From a6ba529317bd7151fdbda5e1494eccc6473aac8a Mon Sep 17 00:00:00 2001 From: Marty Zalega Date: Thu, 21 Jun 2018 15:26:02 +1000 Subject: [PATCH 1/5] Add EKS cluster auth token data resource This allows Terraform to authenticate with an EKS cluster via the Kubernetes provider: ```hcl resource "aws_eks_cluster" "foo" { name = "foo" } data "aws_eks_cluster_auth" "foo_auth" { name = "foo" } provider "kubernetes" { host = "${aws_eks_cluster.foo.endpoint}" cluster_ca_certificate = "${base64decode(aws_eks_cluster.foo.certificate_authority.0.data)}" token = "${data.aws_eks_cluster_auth.foo_auth.token}" } ``` The auth logic was extracted from https://github.com/heptio/aws-iam-authenticator because of lack of documentation from AWS. Basically, the token is a signed URL for the GetCallerIdentity action with a custom header. The URL is then base64 encoded and prefixed with vendor string. --- aws/data_source_aws_eks_cluster_auth.go | 67 ++++++++++++++++++++ aws/data_source_aws_eks_cluster_auth_test.go | 56 ++++++++++++++++ aws/provider.go | 1 + 3 files changed, 124 insertions(+) create mode 100644 aws/data_source_aws_eks_cluster_auth.go create mode 100644 aws/data_source_aws_eks_cluster_auth_test.go diff --git a/aws/data_source_aws_eks_cluster_auth.go b/aws/data_source_aws_eks_cluster_auth.go new file mode 100644 index 000000000000..8cf092d0a5d7 --- /dev/null +++ b/aws/data_source_aws_eks_cluster_auth.go @@ -0,0 +1,67 @@ +package aws + +import ( + "encoding/base64" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/service/sts" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" +) + +const ( + clusterIDHeader = "x-k8s-aws-id" + v1Prefix = "k8s-aws-v1." +) + +func dataSourceAwsEksClusterAuth() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEksClusterAuthRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + + "duration": { + Type: schema.TypeInt, + Optional: true, + Default: 60, + }, + + "token": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + }, + } +} + +func dataSourceAwsEksClusterAuthRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).stsconn + name := d.Get("name").(string) + duration := d.Get("duration").(int) + + request, _ := conn.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{}) + request.HTTPRequest.Header.Add(clusterIDHeader, name) + + url, err := request.Presign(time.Duration(duration) * time.Second) + if err != nil { + return fmt.Errorf("error presigning request: %v", err) + } + + log.Printf("[DEBUG] Generated request: %s", url) + + token := v1Prefix + base64.RawURLEncoding.EncodeToString([]byte(url)) + + d.SetId(time.Now().UTC().String()) + d.Set("token", token) + + return nil +} diff --git a/aws/data_source_aws_eks_cluster_auth_test.go b/aws/data_source_aws_eks_cluster_auth_test.go new file mode 100644 index 000000000000..1ea939ac2286 --- /dev/null +++ b/aws/data_source_aws_eks_cluster_auth_test.go @@ -0,0 +1,56 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSEksClusterAuthDataSource_basic(t *testing.T) { + dataSourceResourceName := "data.aws_eks_cluster_auth.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckAwsEksClusterAuthConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsEksClusterAuthToken(dataSourceResourceName), + ), + }, + }, + }) +} + +func testAccCheckAwsEksClusterAuthToken(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Can't find EKS Cluster Auth resource: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("EKS Cluster Auth resource ID not set.") + } + + name := rs.Primary.Attributes["name"] + if expected := "foobar"; name != expected { + return fmt.Errorf("Incorrect EKS cluster name: expected %q, got %q", expected, name) + } + + if rs.Primary.Attributes["token"] == "" { + return fmt.Errorf("Token expected to not be nil") + } + + return nil + } +} + +const testAccCheckAwsEksClusterAuthConfig_basic = ` +data "aws_eks_cluster_auth" "test" { + name = "foobar" +} +` diff --git a/aws/provider.go b/aws/provider.go index 08ee425c12d9..e9668dd6db11 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -206,6 +206,7 @@ func Provider() terraform.ResourceProvider { "aws_efs_mount_target": dataSourceAwsEfsMountTarget(), "aws_eip": dataSourceAwsEip(), "aws_eks_cluster": dataSourceAwsEksCluster(), + "aws_eks_cluster_auth": dataSourceAwsEksClusterAuth(), "aws_elastic_beanstalk_application": dataSourceAwsElasticBeanstalkApplication(), "aws_elastic_beanstalk_hosted_zone": dataSourceAwsElasticBeanstalkHostedZone(), "aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(), From fc7aac81c5a004910181f43318189db0322eb7b7 Mon Sep 17 00:00:00 2001 From: Michael Barrientos Date: Mon, 4 Feb 2019 01:11:02 -0800 Subject: [PATCH 2/5] Use aws libraries for aws_eks_cluster_auth --- aws/data_source_aws_eks_cluster_auth.go | 34 +++++--------------- aws/data_source_aws_eks_cluster_auth_test.go | 15 +++++++-- go.mod | 14 +++++++- go.sum | 28 ++++++++++++++-- 4 files changed, 60 insertions(+), 31 deletions(-) diff --git a/aws/data_source_aws_eks_cluster_auth.go b/aws/data_source_aws_eks_cluster_auth.go index 8cf092d0a5d7..7fda0c8ffbef 100644 --- a/aws/data_source_aws_eks_cluster_auth.go +++ b/aws/data_source_aws_eks_cluster_auth.go @@ -1,19 +1,12 @@ package aws import ( - "encoding/base64" "fmt" - "log" "time" - "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" -) - -const ( - clusterIDHeader = "x-k8s-aws-id" - v1Prefix = "k8s-aws-v1." + "github.com/kubernetes-sigs/aws-iam-authenticator/pkg/token" ) func dataSourceAwsEksClusterAuth() *schema.Resource { @@ -28,12 +21,6 @@ func dataSourceAwsEksClusterAuth() *schema.Resource { ValidateFunc: validation.NoZeroValues, }, - "duration": { - Type: schema.TypeInt, - Optional: true, - Default: 60, - }, - "token": { Type: schema.TypeString, Computed: true, @@ -46,22 +33,17 @@ func dataSourceAwsEksClusterAuth() *schema.Resource { func dataSourceAwsEksClusterAuthRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).stsconn name := d.Get("name").(string) - duration := d.Get("duration").(int) - - request, _ := conn.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{}) - request.HTTPRequest.Header.Add(clusterIDHeader, name) - - url, err := request.Presign(time.Duration(duration) * time.Second) + generator, err := token.NewGenerator(false) if err != nil { - return fmt.Errorf("error presigning request: %v", err) + return fmt.Errorf("error getting token generator: %v", err) + } + token, err := generator.GetWithSTS(name, conn) + if err != nil { + return fmt.Errorf("error getting token: %v", err) } - - log.Printf("[DEBUG] Generated request: %s", url) - - token := v1Prefix + base64.RawURLEncoding.EncodeToString([]byte(url)) d.SetId(time.Now().UTC().String()) - d.Set("token", token) + d.Set("token", token.Token) return nil } diff --git a/aws/data_source_aws_eks_cluster_auth_test.go b/aws/data_source_aws_eks_cluster_auth_test.go index 1ea939ac2286..b0ef514f2ee0 100644 --- a/aws/data_source_aws_eks_cluster_auth_test.go +++ b/aws/data_source_aws_eks_cluster_auth_test.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "github.com/kubernetes-sigs/aws-iam-authenticator/pkg/token" ) func TestAccAWSEksClusterAuthDataSource_basic(t *testing.T) { @@ -33,7 +34,7 @@ func testAccCheckAwsEksClusterAuthToken(n string) resource.TestCheckFunc { } if rs.Primary.ID == "" { - return fmt.Errorf("EKS Cluster Auth resource ID not set.") + return fmt.Errorf("EKS Cluster Auth resource ID not set") } name := rs.Primary.Attributes["name"] @@ -41,10 +42,20 @@ func testAccCheckAwsEksClusterAuthToken(n string) resource.TestCheckFunc { return fmt.Errorf("Incorrect EKS cluster name: expected %q, got %q", expected, name) } - if rs.Primary.Attributes["token"] == "" { + tok := rs.Primary.Attributes["token"] + if tok == "" { return fmt.Errorf("Token expected to not be nil") } + verifier := token.NewVerifier(name) + identity, err := verifier.Verify(tok) + if err != nil { + return fmt.Errorf("Error verifying token for cluster %q: %v", name, err) + } + if identity.ARN == "" { + return fmt.Errorf("Received unexpected blank ARN for token identity") + } + return nil } } diff --git a/go.mod b/go.mod index 975f1a4899b2..69f10da0592e 100644 --- a/go.mod +++ b/go.mod @@ -6,16 +6,18 @@ require ( github.com/apparentlymart/go-cidr v1.0.0 // indirect github.com/apparentlymart/go-textseg v1.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/aws/aws-sdk-go v1.16.25 + github.com/aws/aws-sdk-go v1.16.26 github.com/beevik/etree v1.0.1 github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.1.0 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/boombuler/barcode v0.0.0-20180809052337-34fff276c74e // indirect github.com/davecgh/go-spew v1.1.1 + github.com/gogo/protobuf v1.2.0 // indirect github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect github.com/golang/protobuf v0.0.0-20171113180720-1e59b77b52bf // indirect github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect + github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect github.com/hashicorp/go-cleanhttp v0.5.0 github.com/hashicorp/go-getter v0.0.0-20180327010114-90bb99a48d86 // indirect github.com/hashicorp/go-hclog v0.0.0-20171005151751-ca137eb4b438 // indirect @@ -32,7 +34,9 @@ require ( github.com/hashicorp/vault v0.10.4 github.com/hashicorp/yamux v0.0.0-20160720233140-d1caa6c97c9f // indirect github.com/jen20/awspolicyequivalence v1.0.0 + github.com/json-iterator/go v1.1.5 // indirect github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba // indirect + github.com/kubernetes-sigs/aws-iam-authenticator v0.3.1-0.20181019024009-82544ec86140 github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 // indirect github.com/mattn/go-isatty v0.0.4 // indirect github.com/mitchellh/cli v0.0.0-20170803042910-8a539dbef410 // indirect @@ -41,8 +45,11 @@ require ( github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 // indirect github.com/mitchellh/go-wordwrap v1.0.0 // indirect github.com/mitchellh/hashstructure v1.0.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect github.com/posener/complete v0.0.0-20170730193024-f4461a52b632 // indirect github.com/pquerna/otp v0.0.0-20180813144649-be78767b3e39 + github.com/spf13/pflag v1.0.3 // indirect github.com/stretchr/testify v1.3.0 // indirect github.com/terraform-providers/terraform-provider-template v0.1.1 github.com/terraform-providers/terraform-provider-tls v0.1.0 @@ -55,5 +62,10 @@ require ( golang.org/x/text v0.0.0-20171024115504-6eab0e8f74e8 // indirect google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63 // indirect google.golang.org/grpc v0.0.0-20171025225919-b5eab4ccac6d // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.2.1 + k8s.io/apimachinery v0.0.0-20190204010555-a98ff070d70e // indirect + k8s.io/client-go v10.0.0+incompatible // indirect + k8s.io/klog v0.1.0 // indirect + sigs.k8s.io/yaml v1.1.0 // indirect ) diff --git a/go.sum b/go.sum index e9f0fade636d..8cb1a702cc1a 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.16.25 h1:qhhvqnAlhmd5g35oFvAwFQzIKeM63hsXB+FaX9DdKFo= -github.com/aws/aws-sdk-go v1.16.25/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.16.26 h1:GWkl3rkRO/JGRTWoLLIqwf7AWC4/W/1hMOUZqmX0js4= +github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beevik/etree v1.0.1 h1:lWzdj5v/Pj1X360EV7bUudox5SRipy4qZLjY0rhb0ck= github.com/beevik/etree v1.0.1/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= @@ -23,12 +23,16 @@ github.com/boombuler/barcode v0.0.0-20180809052337-34fff276c74e/go.mod h1:paBWMc github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/protobuf v0.0.0-20171113180720-1e59b77b52bf h1:pFr/u+m8QUBMW/itAczltF3guNRAL7XDs5tD3f6nSD0= github.com/golang/protobuf v0.0.0-20171113180720-1e59b77b52bf/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= @@ -65,8 +69,12 @@ github.com/jen20/awspolicyequivalence v1.0.0 h1:jLRh4GRf0IfIpMm9/m+krLnjAda4NpI9 github.com/jen20/awspolicyequivalence v1.0.0/go.mod h1:PV1fS2xyHhCLp83vbgSMFr2drM4GzG61wkz+k4pOG3E= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba h1:NARVGAAgEXvoMeNPHhPFt1SBt1VMznA3Gnz9d0qj+co= github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= +github.com/kubernetes-sigs/aws-iam-authenticator v0.3.1-0.20181019024009-82544ec86140 h1:AtXWrgewhHlLux0IAfHINCbkxkf47euklyallWlximw= +github.com/kubernetes-sigs/aws-iam-authenticator v0.3.1-0.20181019024009-82544ec86140/go.mod h1:ItxiN33Ho7Di8wiC4S4XqbH1NLF0DNdDWOd/5MI9gJU= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= @@ -87,12 +95,18 @@ github.com/mitchellh/mapstructure v1.0.0 h1:vVpGvMXJPqSDh2VYHF7gsfQj8Ncx+Xw5Y1KH github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v0.0.0-20170730193024-f4461a52b632 h1:/BqixcUMwSgBflSrORoggJ7Gh2SdP5uIuY0qG9Jkeys= github.com/posener/complete v0.0.0-20170730193024-f4461a52b632/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/otp v0.0.0-20180813144649-be78767b3e39 h1:XgT+Lddv+T5lOP+FauMBWuHqf3zJH4FkkrFrRGzrino= github.com/pquerna/otp v0.0.0-20180813144649-be78767b3e39/go.mod h1:Zad1CMQfSQZI5KLpahDiSUX4tMMREnXw98IvL1nhgMk= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -120,5 +134,15 @@ google.golang.org/grpc v0.0.0-20171025225919-b5eab4ccac6d h1:K+wEnjFjaXJFIWQDwdu google.golang.org/grpc v0.0.0-20171025225919-b5eab4ccac6d/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/apimachinery v0.0.0-20190204010555-a98ff070d70e h1:7HZ9Pkl78EapVMHYAVjF1128N/w6ke+aPyo64M9I2Ds= +k8s.io/apimachinery v0.0.0-20190204010555-a98ff070d70e/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/client-go v10.0.0+incompatible h1:F1IqCqw7oMBzDkqlcBymRq1450wD0eNqLE9jzUrIi34= +k8s.io/client-go v10.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/klog v0.1.0 h1:I5HMfc/DtuVaGR1KPwUrTc476K8NCqNBldC7H4dYEzk= +k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= From 2a351720de5ebd3032c6f247f25450ffc3eb05fd Mon Sep 17 00:00:00 2001 From: Michael Barrientos Date: Wed, 6 Feb 2019 09:40:16 -0800 Subject: [PATCH 3/5] aws_eks_cluster_auth add vendored libs, fixes, test fixes --- aws/data_source_aws_eks_cluster_auth.go | 1 - aws/data_source_aws_eks_cluster_auth_test.go | 13 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 35 - .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/codecommit/api.go | 12 + .../aws-sdk-go/service/codecommit/errors.go | 18 + .../interface.go | 79 - .../aws/aws-sdk-go/service/devicefarm/api.go | 267 +- .../aws-sdk-go/service/mediaconnect/api.go | 477 + .../aws/aws-sdk-go/service/medialive/api.go | 220 +- vendor/github.com/gogo/protobuf/AUTHORS | 15 + vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 + vendor/github.com/gogo/protobuf/LICENSE | 35 + .../github.com/gogo/protobuf/proto/Makefile | 43 + .../github.com/gogo/protobuf/proto/clone.go | 258 + .../gogo/protobuf/proto/custom_gogo.go | 39 + .../github.com/gogo/protobuf/proto/decode.go | 428 + .../github.com/gogo/protobuf/proto/discard.go | 350 + .../gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 49 + .../github.com/gogo/protobuf/proto/encode.go | 203 + .../gogo/protobuf/proto/encode_gogo.go | 33 + .../github.com/gogo/protobuf/proto/equal.go | 300 + .../gogo/protobuf/proto/extensions.go | 604 ++ .../gogo/protobuf/proto/extensions_gogo.go | 368 + vendor/github.com/gogo/protobuf/proto/lib.go | 987 ++ .../gogo/protobuf/proto/lib_gogo.go | 50 + .../gogo/protobuf/proto/message_set.go | 314 + .../gogo/protobuf/proto/pointer_reflect.go | 357 + .../protobuf/proto/pointer_reflect_gogo.go | 59 + .../gogo/protobuf/proto/pointer_unsafe.go | 308 + .../protobuf/proto/pointer_unsafe_gogo.go | 56 + .../gogo/protobuf/proto/properties.go | 608 ++ .../gogo/protobuf/proto/properties_gogo.go | 36 + .../gogo/protobuf/proto/skip_gogo.go | 119 + .../gogo/protobuf/proto/table_marshal.go | 3006 ++++++ .../gogo/protobuf/proto/table_marshal_gogo.go | 388 + .../gogo/protobuf/proto/table_merge.go | 657 ++ .../gogo/protobuf/proto/table_unmarshal.go | 2245 +++++ .../protobuf/proto/table_unmarshal_gogo.go | 385 + vendor/github.com/gogo/protobuf/proto/text.go | 928 ++ .../gogo/protobuf/proto/text_gogo.go | 57 + .../gogo/protobuf/proto/text_parser.go | 1018 ++ .../gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 49 + .../gogo/protobuf/proto/wrappers.go | 1888 ++++ .../gogo/protobuf/proto/wrappers_gogo.go | 113 + .../gogo/protobuf/sortkeys/sortkeys.go | 101 + vendor/github.com/google/gofuzz/.travis.yml | 13 + .../github.com/google/gofuzz/CONTRIBUTING.md | 67 + vendor/github.com/google/gofuzz/LICENSE | 202 + vendor/github.com/google/gofuzz/README.md | 71 + vendor/github.com/google/gofuzz/doc.go | 18 + vendor/github.com/google/gofuzz/fuzz.go | 487 + .../aws-iam-authenticator/LICENSE | 201 + .../aws-iam-authenticator/pkg/arn/arn.go | 69 + .../aws-iam-authenticator/pkg/token/token.go | 432 + vendor/gopkg.in/inf.v0/LICENSE | 28 + vendor/gopkg.in/inf.v0/dec.go | 615 ++ vendor/gopkg.in/inf.v0/rounder.go | 145 + vendor/k8s.io/apimachinery/LICENSE | 202 + .../apimachinery/pkg/api/resource/OWNERS | 16 + .../apimachinery/pkg/api/resource/amount.go | 299 + .../pkg/api/resource/generated.pb.go | 75 + .../pkg/api/resource/generated.proto | 88 + .../apimachinery/pkg/api/resource/math.go | 314 + .../apimachinery/pkg/api/resource/quantity.go | 738 ++ .../pkg/api/resource/quantity_proto.go | 284 + .../pkg/api/resource/scale_int.go | 95 + .../apimachinery/pkg/api/resource/suffix.go | 198 + .../pkg/api/resource/zz_generated.deepcopy.go | 27 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 31 + .../pkg/apis/meta/v1/controller_ref.go | 54 + .../pkg/apis/meta/v1/conversion.go | 319 + .../apimachinery/pkg/apis/meta/v1/doc.go | 23 + .../apimachinery/pkg/apis/meta/v1/duration.go | 60 + .../pkg/apis/meta/v1/generated.pb.go | 8256 +++++++++++++++++ .../pkg/apis/meta/v1/generated.proto | 879 ++ .../pkg/apis/meta/v1/group_version.go | 148 + .../apimachinery/pkg/apis/meta/v1/helpers.go | 246 + .../apimachinery/pkg/apis/meta/v1/labels.go | 55 + .../apimachinery/pkg/apis/meta/v1/meta.go | 170 + .../pkg/apis/meta/v1/micro_time.go | 183 + .../pkg/apis/meta/v1/micro_time_proto.go | 72 + .../apimachinery/pkg/apis/meta/v1/register.go | 97 + .../apimachinery/pkg/apis/meta/v1/time.go | 185 + .../pkg/apis/meta/v1/time_proto.go | 92 + .../apimachinery/pkg/apis/meta/v1/types.go | 1011 ++ .../meta/v1/types_swagger_doc_generated.go | 348 + .../apimachinery/pkg/apis/meta/v1/watch.go | 89 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 961 ++ .../pkg/apis/meta/v1/zz_generated.defaults.go | 32 + .../apimachinery/pkg/conversion/converter.go | 898 ++ .../apimachinery/pkg/conversion/deep_equal.go | 36 + .../k8s.io/apimachinery/pkg/conversion/doc.go | 24 + .../apimachinery/pkg/conversion/helper.go | 39 + .../pkg/conversion/queryparams/convert.go | 198 + .../pkg/conversion/queryparams/doc.go | 19 + vendor/k8s.io/apimachinery/pkg/fields/doc.go | 19 + .../k8s.io/apimachinery/pkg/fields/fields.go | 62 + .../apimachinery/pkg/fields/requirements.go | 30 + .../apimachinery/pkg/fields/selector.go | 476 + vendor/k8s.io/apimachinery/pkg/labels/doc.go | 19 + .../k8s.io/apimachinery/pkg/labels/labels.go | 181 + .../apimachinery/pkg/labels/selector.go | 891 ++ .../pkg/labels/zz_generated.deepcopy.go | 42 + .../k8s.io/apimachinery/pkg/runtime/codec.go | 332 + .../apimachinery/pkg/runtime/codec_check.go | 48 + .../apimachinery/pkg/runtime/conversion.go | 113 + .../apimachinery/pkg/runtime/converter.go | 805 ++ vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 51 + .../apimachinery/pkg/runtime/embedded.go | 142 + .../k8s.io/apimachinery/pkg/runtime/error.go | 122 + .../apimachinery/pkg/runtime/extension.go | 51 + .../apimachinery/pkg/runtime/generated.pb.go | 753 ++ .../apimachinery/pkg/runtime/generated.proto | 127 + .../k8s.io/apimachinery/pkg/runtime/helper.go | 212 + .../apimachinery/pkg/runtime/interfaces.go | 252 + .../apimachinery/pkg/runtime/register.go | 61 + .../pkg/runtime/schema/generated.pb.go | 63 + .../pkg/runtime/schema/generated.proto | 26 + .../pkg/runtime/schema/group_version.go | 300 + .../pkg/runtime/schema/interfaces.go | 40 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 754 ++ .../pkg/runtime/scheme_builder.go | 48 + .../pkg/runtime/swagger_doc_generator.go | 262 + .../k8s.io/apimachinery/pkg/runtime/types.go | 137 + .../apimachinery/pkg/runtime/types_proto.go | 69 + .../pkg/runtime/zz_generated.deepcopy.go | 108 + .../apimachinery/pkg/selection/operator.go | 33 + vendor/k8s.io/apimachinery/pkg/types/doc.go | 18 + .../apimachinery/pkg/types/namespacedname.go | 43 + .../k8s.io/apimachinery/pkg/types/nodename.go | 43 + vendor/k8s.io/apimachinery/pkg/types/patch.go | 28 + vendor/k8s.io/apimachinery/pkg/types/uid.go | 22 + .../apimachinery/pkg/util/errors/doc.go | 18 + .../apimachinery/pkg/util/errors/errors.go | 201 + .../pkg/util/intstr/generated.pb.go | 362 + .../pkg/util/intstr/generated.proto | 43 + .../apimachinery/pkg/util/intstr/intstr.go | 184 + .../k8s.io/apimachinery/pkg/util/json/json.go | 119 + .../pkg/util/naming/from_stack.go | 93 + .../k8s.io/apimachinery/pkg/util/net/http.go | 445 + .../apimachinery/pkg/util/net/interface.go | 416 + .../apimachinery/pkg/util/net/port_range.go | 149 + .../apimachinery/pkg/util/net/port_split.go | 77 + .../k8s.io/apimachinery/pkg/util/net/util.go | 56 + .../apimachinery/pkg/util/runtime/runtime.go | 173 + .../k8s.io/apimachinery/pkg/util/sets/byte.go | 203 + .../k8s.io/apimachinery/pkg/util/sets/doc.go | 20 + .../apimachinery/pkg/util/sets/empty.go | 23 + .../k8s.io/apimachinery/pkg/util/sets/int.go | 203 + .../apimachinery/pkg/util/sets/int64.go | 203 + .../apimachinery/pkg/util/sets/string.go | 203 + .../pkg/util/validation/field/errors.go | 259 + .../pkg/util/validation/field/path.go | 91 + .../pkg/util/validation/validation.go | 416 + vendor/k8s.io/apimachinery/pkg/watch/doc.go | 19 + .../k8s.io/apimachinery/pkg/watch/filter.go | 105 + vendor/k8s.io/apimachinery/pkg/watch/mux.go | 260 + .../apimachinery/pkg/watch/streamwatcher.go | 119 + vendor/k8s.io/apimachinery/pkg/watch/watch.go | 317 + .../pkg/watch/zz_generated.deepcopy.go | 40 + .../forked/golang/reflect/deep_equal.go | 388 + vendor/k8s.io/client-go/LICENSE | 202 + .../pkg/apis/clientauthentication/OWNERS | 7 + .../pkg/apis/clientauthentication/doc.go | 20 + .../pkg/apis/clientauthentication/register.go | 50 + .../pkg/apis/clientauthentication/types.go | 77 + .../apis/clientauthentication/v1alpha1/doc.go | 24 + .../clientauthentication/v1alpha1/register.go | 55 + .../clientauthentication/v1alpha1/types.go | 78 + .../v1alpha1/zz_generated.conversion.go | 176 + .../v1alpha1/zz_generated.deepcopy.go | 128 + .../v1alpha1/zz_generated.defaults.go | 32 + .../zz_generated.deepcopy.go | 128 + vendor/k8s.io/klog/.travis.yml | 14 + vendor/k8s.io/klog/CONTRIBUTING.md | 31 + vendor/k8s.io/klog/LICENSE | 191 + vendor/k8s.io/klog/OWNERS | 11 + vendor/k8s.io/klog/README.md | 51 + vendor/k8s.io/klog/RELEASE.md | 9 + vendor/k8s.io/klog/SECURITY_CONTACTS | 20 + vendor/k8s.io/klog/klog.go | 1239 +++ vendor/k8s.io/klog/klog_file.go | 126 + vendor/modules.txt | 39 +- 186 files changed, 51483 insertions(+), 236 deletions(-) delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/costandusagereportserviceiface/interface.go create mode 100644 vendor/github.com/gogo/protobuf/AUTHORS create mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/gogo/protobuf/LICENSE create mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go create mode 100644 vendor/github.com/google/gofuzz/.travis.yml create mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md create mode 100644 vendor/github.com/google/gofuzz/LICENSE create mode 100644 vendor/github.com/google/gofuzz/README.md create mode 100644 vendor/github.com/google/gofuzz/doc.go create mode 100644 vendor/github.com/google/gofuzz/fuzz.go create mode 100644 vendor/github.com/kubernetes-sigs/aws-iam-authenticator/LICENSE create mode 100644 vendor/github.com/kubernetes-sigs/aws-iam-authenticator/pkg/arn/arn.go create mode 100644 vendor/github.com/kubernetes-sigs/aws-iam-authenticator/pkg/token/token.go create mode 100644 vendor/gopkg.in/inf.v0/LICENSE create mode 100644 vendor/gopkg.in/inf.v0/dec.go create mode 100644 vendor/gopkg.in/inf.v0/rounder.go create mode 100644 vendor/k8s.io/apimachinery/LICENSE create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/amount.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/math.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/converter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/helper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/fields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/requirements.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/selector.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/labels.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/selector.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/codec.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/converter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/embedded.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/error.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/extension.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/helper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/scheme.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/selection/operator.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/namespacedname.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/nodename.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/patch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/uid.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/json/json.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/http.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/interface.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/port_range.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/port_split.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/util.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/byte.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/empty.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int64.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/string.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/filter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/mux.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/watch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go create mode 100644 vendor/k8s.io/client-go/LICENSE create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/klog/.travis.yml create mode 100644 vendor/k8s.io/klog/CONTRIBUTING.md create mode 100644 vendor/k8s.io/klog/LICENSE create mode 100644 vendor/k8s.io/klog/OWNERS create mode 100644 vendor/k8s.io/klog/README.md create mode 100644 vendor/k8s.io/klog/RELEASE.md create mode 100644 vendor/k8s.io/klog/SECURITY_CONTACTS create mode 100644 vendor/k8s.io/klog/klog.go create mode 100644 vendor/k8s.io/klog/klog_file.go diff --git a/aws/data_source_aws_eks_cluster_auth.go b/aws/data_source_aws_eks_cluster_auth.go index 7fda0c8ffbef..4ace7b051d5d 100644 --- a/aws/data_source_aws_eks_cluster_auth.go +++ b/aws/data_source_aws_eks_cluster_auth.go @@ -17,7 +17,6 @@ func dataSourceAwsEksClusterAuth() *schema.Resource { "name": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.NoZeroValues, }, diff --git a/aws/data_source_aws_eks_cluster_auth_test.go b/aws/data_source_aws_eks_cluster_auth_test.go index b0ef514f2ee0..8fae1d4d8db2 100644 --- a/aws/data_source_aws_eks_cluster_auth_test.go +++ b/aws/data_source_aws_eks_cluster_auth_test.go @@ -12,13 +12,15 @@ import ( func TestAccAWSEksClusterAuthDataSource_basic(t *testing.T) { dataSourceResourceName := "data.aws_eks_cluster_auth.test" - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { Config: testAccCheckAwsEksClusterAuthConfig_basic, Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceResourceName, "name", "foobar"), + resource.TestCheckResourceAttrSet(dataSourceResourceName, "token"), testAccCheckAwsEksClusterAuthToken(dataSourceResourceName), ), }, @@ -37,16 +39,7 @@ func testAccCheckAwsEksClusterAuthToken(n string) resource.TestCheckFunc { return fmt.Errorf("EKS Cluster Auth resource ID not set") } - name := rs.Primary.Attributes["name"] - if expected := "foobar"; name != expected { - return fmt.Errorf("Incorrect EKS cluster name: expected %q, got %q", expected, name) - } - tok := rs.Primary.Attributes["token"] - if tok == "" { - return fmt.Errorf("Token expected to not be nil") - } - verifier := token.NewVerifier(name) identity, err := verifier.Verify(tok) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 6b5795a9b30b..97486c32b4cb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1113,27 +1113,6 @@ var awsPartition = partition{ }, }, }, - "ecr": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "ecs": service{ Endpoints: endpoints{ @@ -3167,13 +3146,6 @@ var awscnPartition = partition{ }, }, }, - "ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, "ecs": service{ Endpoints: endpoints{ @@ -3637,13 +3609,6 @@ var awsusgovPartition = partition{ }, }, }, - "ecr": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, "ecs": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 19edef38fd2f..000c839b7e30 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.16.25" +const SDKVersion = "1.16.26" diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go index 2f8d260bd01c..a897e65b884a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go @@ -4043,6 +4043,13 @@ func (c *CodeCommit) PutFileRequest(input *PutFileInput) (req *request.Request, // can be added using PutFile is 6 MB. For files larger than 6 MB but smaller // than 2 GB, add them using a Git client. // +// * ErrCodeFolderContentSizeLimitExceededException "FolderContentSizeLimitExceededException" +// The specified file is in a folder that exceeds the folder content size limit. +// Either save the file in a folder that has less content, or remove files or +// subfolders from the folder so it does not exceed the size limit. For more +// information about limits in AWS CodeCommit, see AWS CodeCommit User Guide +// (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). +// // * ErrCodePathRequiredException "PathRequiredException" // The folderPath for a location cannot be null. // @@ -4115,6 +4122,11 @@ func (c *CodeCommit) PutFileRequest(input *PutFileInput) (req *request.Request, // provide a different name for the file, or specify a different path for the // file. // +// * ErrCodeFilePathConflictsWithSubmodulePathException "FilePathConflictsWithSubmodulePathException" +// The specified file path or folder has the same path as a submodule in this +// repository. Either provide a different name for the file, or save the file +// in a directory that does not conflict with the submodule path. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutFile func (c *CodeCommit) PutFile(input *PutFileInput) (*PutFileOutput, error) { req, out := c.PutFileRequest(input) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go index 2301b41f556d..f0db5077f5a4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go @@ -220,6 +220,14 @@ const ( // file name. ErrCodeFileNameConflictsWithDirectoryNameException = "FileNameConflictsWithDirectoryNameException" + // ErrCodeFilePathConflictsWithSubmodulePathException for service response error code + // "FilePathConflictsWithSubmodulePathException". + // + // The specified file path or folder has the same path as a submodule in this + // repository. Either provide a different name for the file, or save the file + // in a directory that does not conflict with the submodule path. + ErrCodeFilePathConflictsWithSubmodulePathException = "FilePathConflictsWithSubmodulePathException" + // ErrCodeFileTooLargeException for service response error code // "FileTooLargeException". // @@ -228,6 +236,16 @@ const ( // (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). ErrCodeFileTooLargeException = "FileTooLargeException" + // ErrCodeFolderContentSizeLimitExceededException for service response error code + // "FolderContentSizeLimitExceededException". + // + // The specified file is in a folder that exceeds the folder content size limit. + // Either save the file in a folder that has less content, or remove files or + // subfolders from the folder so it does not exceed the size limit. For more + // information about limits in AWS CodeCommit, see AWS CodeCommit User Guide + // (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). + ErrCodeFolderContentSizeLimitExceededException = "FolderContentSizeLimitExceededException" + // ErrCodeFolderDoesNotExistException for service response error code // "FolderDoesNotExistException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/costandusagereportserviceiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/costandusagereportserviceiface/interface.go deleted file mode 100644 index 6c552c5fbff8..000000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/costandusagereportserviceiface/interface.go +++ /dev/null @@ -1,79 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package costandusagereportserviceiface provides an interface to enable mocking the AWS Cost and Usage Report Service service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package costandusagereportserviceiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/costandusagereportservice" -) - -// CostandUsageReportServiceAPI provides an interface to enable mocking the -// costandusagereportservice.CostandUsageReportService service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // AWS Cost and Usage Report Service. -// func myFunc(svc costandusagereportserviceiface.CostandUsageReportServiceAPI) bool { -// // Make svc.DeleteReportDefinition request -// } -// -// func main() { -// sess := session.New() -// svc := costandusagereportservice.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockCostandUsageReportServiceClient struct { -// costandusagereportserviceiface.CostandUsageReportServiceAPI -// } -// func (m *mockCostandUsageReportServiceClient) DeleteReportDefinition(input *costandusagereportservice.DeleteReportDefinitionInput) (*costandusagereportservice.DeleteReportDefinitionOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockCostandUsageReportServiceClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type CostandUsageReportServiceAPI interface { - DeleteReportDefinition(*costandusagereportservice.DeleteReportDefinitionInput) (*costandusagereportservice.DeleteReportDefinitionOutput, error) - DeleteReportDefinitionWithContext(aws.Context, *costandusagereportservice.DeleteReportDefinitionInput, ...request.Option) (*costandusagereportservice.DeleteReportDefinitionOutput, error) - DeleteReportDefinitionRequest(*costandusagereportservice.DeleteReportDefinitionInput) (*request.Request, *costandusagereportservice.DeleteReportDefinitionOutput) - - DescribeReportDefinitions(*costandusagereportservice.DescribeReportDefinitionsInput) (*costandusagereportservice.DescribeReportDefinitionsOutput, error) - DescribeReportDefinitionsWithContext(aws.Context, *costandusagereportservice.DescribeReportDefinitionsInput, ...request.Option) (*costandusagereportservice.DescribeReportDefinitionsOutput, error) - DescribeReportDefinitionsRequest(*costandusagereportservice.DescribeReportDefinitionsInput) (*request.Request, *costandusagereportservice.DescribeReportDefinitionsOutput) - - DescribeReportDefinitionsPages(*costandusagereportservice.DescribeReportDefinitionsInput, func(*costandusagereportservice.DescribeReportDefinitionsOutput, bool) bool) error - DescribeReportDefinitionsPagesWithContext(aws.Context, *costandusagereportservice.DescribeReportDefinitionsInput, func(*costandusagereportservice.DescribeReportDefinitionsOutput, bool) bool, ...request.Option) error - - PutReportDefinition(*costandusagereportservice.PutReportDefinitionInput) (*costandusagereportservice.PutReportDefinitionOutput, error) - PutReportDefinitionWithContext(aws.Context, *costandusagereportservice.PutReportDefinitionInput, ...request.Option) (*costandusagereportservice.PutReportDefinitionOutput, error) - PutReportDefinitionRequest(*costandusagereportservice.PutReportDefinitionInput) (*request.Request, *costandusagereportservice.PutReportDefinitionOutput) -} - -var _ CostandUsageReportServiceAPI = (*costandusagereportservice.CostandUsageReportService)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go index 5f65456c3905..3183204612fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go @@ -6855,6 +6855,16 @@ type CreateDevicePoolInput struct { // The device pool's description. Description *string `locationName:"description" type:"string"` + // The number of devices that Device Farm can add to your device pool. Device + // Farm adds devices that are available and that meet the criteria that you + // assign for the rules parameter. Depending on how many devices meet these + // constraints, your device pool might contain fewer devices than the value + // for this parameter. + // + // By specifying the maximum number of devices, you can control the costs that + // you incur by running tests. + MaxDevices *int64 `locationName:"maxDevices" type:"integer"` + // The device pool's name. // // Name is a required field @@ -6909,6 +6919,12 @@ func (s *CreateDevicePoolInput) SetDescription(v string) *CreateDevicePoolInput return s } +// SetMaxDevices sets the MaxDevices field's value. +func (s *CreateDevicePoolInput) SetMaxDevices(v int64) *CreateDevicePoolInput { + s.MaxDevices = &v + return s +} + // SetName sets the Name field's value. func (s *CreateDevicePoolInput) SetName(v string) *CreateDevicePoolInput { s.Name = &v @@ -8565,49 +8581,65 @@ type DeviceFilter struct { // The aspect of a device such as platform or model used as the selection criteria // in a device filter. // - // Allowed values include: + // The supported operators for each attribute are provided in the following + // list. // - // * ARN: The Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". + // ARNThe Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". // - // * PLATFORM: The device platform. Valid values are "ANDROID" or "IOS". + // Supported operators: EQUALS, IN, NOT_IN // - // * OS_VERSION: The operating system version. For example, "10.3.2". + // PLATFORMThe device platform. Valid values are "ANDROID" or "IOS". // - // * MODEL: The device model. For example, "iPad 5th Gen". + // Supported operators: EQUALS // - // * AVAILABILITY: The current availability of the device. Valid values are - // "AVAILABLE", "HIGHLY_AVAILABLE", "BUSY", or "TEMPORARY_NOT_AVAILABLE". + // OS_VERSIONThe operating system version. For example, "10.3.2". // - // * FORM_FACTOR: The device form factor. Valid values are "PHONE" or "TABLET". + // Supported operators: EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, IN, LESS_THAN, + // LESS_THAN_OR_EQUALS, NOT_IN // - // * MANUFACTURER: The device manufacturer. For example, "Apple". + // MODELThe device model. For example, "iPad 5th Gen". // - // * REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. - // Valid values are "TRUE" or "FALSE". + // Supported operators: CONTAINS, EQUALS, IN, NOT_IN // - // * REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. - // Valid values are "TRUE" or "FALSE". + // AVAILABILITYThe current availability of the device. Valid values are "AVAILABLE", + // "HIGHLY_AVAILABLE", "BUSY", or "TEMPORARY_NOT_AVAILABLE". // - // * INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance. + // Supported operators: EQUALS // - // * INSTANCE_LABELS: The label of the device instance. + // FORM_FACTORThe device form factor. Valid values are "PHONE" or "TABLET". // - // * FLEET_TYPE: The fleet type. Valid values are "PUBLIC" or "PRIVATE". - Attribute *string `locationName:"attribute" type:"string" enum:"DeviceFilterAttribute"` - - // The filter operator. + // Supported operators: EQUALS // - // * The EQUALS operator is available for every attribute except INSTANCE_LABELS. + // MANUFACTURERThe device manufacturer. For example, "Apple". // - // * The CONTAINS operator is available for the INSTANCE_LABELS and MODEL - // attributes. + // Supported operators: EQUALS, IN, NOT_IN // - // * The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, - // MANUFACTURER, and INSTANCE_ARN attributes. + // REMOTE_ACCESS_ENABLEDWhether the device is enabled for remote access. Valid + // values are "TRUE" or "FALSE". // - // * The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS - // operators are also available for the OS_VERSION attribute. - Operator *string `locationName:"operator" type:"string" enum:"DeviceFilterOperator"` + // Supported operators: EQUALS + // + // REMOTE_DEBUG_ENABLEDWhether the device is enabled for remote debugging. Valid + // values are "TRUE" or "FALSE". + // + // Supported operators: EQUALS + // + // INSTANCE_ARNThe Amazon Resource Name (ARN) of the device instance. + // + // Supported operators: EQUALS, IN, NOT_IN + // + // INSTANCE_LABELSThe label of the device instance. + // + // Supported operators: CONTAINS + // + // FLEET_TYPEThe fleet type. Valid values are "PUBLIC" or "PRIVATE". + // + // Supported operators: EQUALS + Attribute *string `locationName:"attribute" type:"string" enum:"DeviceFilterAttribute"` + + // Specifies how Device Farm compares the filter's attribute to the value. For + // the operators that are supported by each attribute, see the attribute descriptions. + Operator *string `locationName:"operator" type:"string" enum:"RuleOperator"` // An array of one or more filter values used in a device filter. // @@ -8784,6 +8816,16 @@ type DevicePool struct { // The device pool's description. Description *string `locationName:"description" type:"string"` + // The number of devices that Device Farm can add to your device pool. Device + // Farm adds devices that are available and that meet the criteria that you + // assign for the rules parameter. Depending on how many devices meet these + // constraints, your device pool might contain fewer devices than the value + // for this parameter. + // + // By specifying the maximum number of devices, you can control the costs that + // you incur by running tests. + MaxDevices *int64 `locationName:"maxDevices" type:"integer"` + // The device pool's name. Name *string `locationName:"name" type:"string"` @@ -8823,6 +8865,12 @@ func (s *DevicePool) SetDescription(v string) *DevicePool { return s } +// SetMaxDevices sets the MaxDevices field's value. +func (s *DevicePool) SetMaxDevices(v int64) *DevicePool { + s.MaxDevices = &v + return s +} + // SetName sets the Name field's value. func (s *DevicePool) SetName(v string) *DevicePool { s.Name = &v @@ -13310,7 +13358,7 @@ type RemoteAccessSession struct { // The billing method of the remote access session. Possible values include // METERED or UNMETERED. For more information about metered devices, see AWS - // Device Farm terminology (http://docs.aws.amazon.com/devicefarm/latest/developerguide/welcome.html#welcome-terminology)." + // Device Farm terminology (https://docs.aws.amazon.com/devicefarm/latest/developerguide/welcome.html#welcome-terminology)." BillingMethod *string `locationName:"billingMethod" type:"string" enum:"BillingMethod"` // Unique identifier of your client for the remote access session. Only returned @@ -13667,61 +13715,77 @@ func (s *Resolution) SetWidth(v int64) *Resolution { return s } -// Represents a condition for a device pool. It is passed in as the rules parameter -// to CreateDevicePool and UpdateDevicePool. +// Represents a condition for a device pool. type Rule struct { _ struct{} `type:"structure"` - // The rule's attribute. It is the aspect of a device such as platform or model - // used as selection criteria to create or update a device pool. + // The rule's stringified attribute. For example, specify the value as "\"abc\"". // - // Allowed values include: + // The supported operators for each attribute are provided in the following + // list. // - // * ARN: The Amazon Resource Name (ARN) of a device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". + // APPIUM_VERSIONThe Appium version for the test. // - // * PLATFORM: The device platform. Valid values are "ANDROID" or "IOS". + // Supported operators: CONTAINS // - // * FORM_FACTOR: The device form factor. Valid values are "PHONE" or "TABLET". + // ARNThe Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". // - // * MANUFACTURER: The device manufacturer. For example, "Apple". + // Supported operators: EQUALS, IN, NOT_IN // - // * REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. - // Valid values are "TRUE" or "FALSE". + // AVAILABILITYThe current availability of the device. Valid values are "AVAILABLE", + // "HIGHLY_AVAILABLE", "BUSY", or "TEMPORARY_NOT_AVAILABLE". // - // * REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. - // Valid values are "TRUE" or "FALSE". + // Supported operators: EQUALS // - // * APPIUM_VERSION: The Appium version for the test. + // FLEET_TYPEThe fleet type. Valid values are "PUBLIC" or "PRIVATE". // - // * INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance. + // Supported operators: EQUALS // - // * INSTANCE_LABELS: The label of the device instance. + // FORM_FACTORThe device form factor. Valid values are "PHONE" or "TABLET". // - // * FLEET_TYPE: The fleet type. Valid values are "PUBLIC" or "PRIVATE". - Attribute *string `locationName:"attribute" type:"string" enum:"DeviceAttribute"` - - // The rule's operator. + // Supported operators: EQUALS, IN, NOT_IN // - // * EQUALS: The equals operator. + // INSTANCE_ARNThe Amazon Resource Name (ARN) of the device instance. // - // * GREATER_THAN: The greater-than operator. + // Supported operators: IN, NOT_IN // - // * IN: The in operator. + // INSTANCE_LABELSThe label of the device instance. // - // * LESS_THAN: The less-than operator. + // Supported operators: CONTAINS // - // * NOT_IN: The not-in operator. + // MANUFACTURERThe device manufacturer. For example, "Apple". // - // * CONTAINS: The contains operator. - Operator *string `locationName:"operator" type:"string" enum:"RuleOperator"` - - // The rule's value. + // Supported operators: EQUALS, IN, NOT_IN // - // The value must be passed in as a string using escaped quotes. + // MODELThe device model, such as "Apple iPad Air 2" or "Google Pixel". // - // For example: + // Supported operators: CONTAINS, EQUALS, IN, NOT_IN // - // "value": "\"ANDROID\"" + // OS_VERSIONThe operating system version. For example, "10.3.2". + // + // Supported operators: EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, IN, LESS_THAN, + // LESS_THAN_OR_EQUALS, NOT_IN + // + // PLATFORMThe device platform. Valid values are "ANDROID" or "IOS". + // + // Supported operators: EQUALS, IN, NOT_IN + // + // REMOTE_ACCESS_ENABLEDWhether the device is enabled for remote access. Valid + // values are "TRUE" or "FALSE". + // + // Supported operators: EQUALS + // + // REMOTE_DEBUG_ENABLEDWhether the device is enabled for remote debugging. Valid + // values are "TRUE" or "FALSE". + // + // Supported operators: EQUALS + Attribute *string `locationName:"attribute" type:"string" enum:"DeviceAttribute"` + + // Specifies how Device Farm compares the rule's attribute to the value. For + // the operators that are supported by each attribute, see the attribute descriptions. + Operator *string `locationName:"operator" type:"string" enum:"RuleOperator"` + + // The rule's value. Value *string `locationName:"value" type:"string"` } @@ -14358,8 +14422,6 @@ type ScheduleRunInput struct { Configuration *ScheduleRunConfiguration `locationName:"configuration" type:"structure"` // The ARN of the device pool for the run to be scheduled. - // - // Either devicePoolArn or deviceSelectionConfiguration is required in a request. DevicePoolArn *string `locationName:"devicePoolArn" min:"32" type:"string"` // The filter criteria used to dynamically select a set of devices for a test @@ -14508,8 +14570,9 @@ func (s *ScheduleRunOutput) SetRun(v *Run) *ScheduleRunOutput { return s } -// Represents test settings. This data structure is passed in as the test parameter -// to ScheduleRun. For an example of the JSON request syntax, see ScheduleRun. +// Represents test settings. This data structure is passed in as the "test" +// parameter to ScheduleRun. For an example of the JSON request syntax, see +// ScheduleRun. type ScheduleRunTest struct { _ struct{} `type:"structure"` @@ -15449,9 +15512,32 @@ type UpdateDevicePoolInput struct { // Arn is a required field Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + // Sets whether the maxDevices parameter applies to your device pool. If you + // set this parameter to true, the maxDevices parameter does not apply, and + // Device Farm does not limit the number of devices that it adds to your device + // pool. In this case, Device Farm adds all available devices that meet the + // criteria that are specified for the rules parameter. + // + // If you use this parameter in your request, you cannot use the maxDevices + // parameter in the same request. + ClearMaxDevices *bool `locationName:"clearMaxDevices" type:"boolean"` + // A description of the device pool you wish to update. Description *string `locationName:"description" type:"string"` + // The number of devices that Device Farm can add to your device pool. Device + // Farm adds devices that are available and that meet the criteria that you + // assign for the rules parameter. Depending on how many devices meet these + // constraints, your device pool might contain fewer devices than the value + // for this parameter. + // + // By specifying the maximum number of devices, you can control the costs that + // you incur by running tests. + // + // If you use this parameter in your request, you cannot use the clearMaxDevices + // parameter in the same request. + MaxDevices *int64 `locationName:"maxDevices" type:"integer"` + // A string representing the name of the device pool you wish to update. Name *string `locationName:"name" type:"string"` @@ -15493,12 +15579,24 @@ func (s *UpdateDevicePoolInput) SetArn(v string) *UpdateDevicePoolInput { return s } +// SetClearMaxDevices sets the ClearMaxDevices field's value. +func (s *UpdateDevicePoolInput) SetClearMaxDevices(v bool) *UpdateDevicePoolInput { + s.ClearMaxDevices = &v + return s +} + // SetDescription sets the Description field's value. func (s *UpdateDevicePoolInput) SetDescription(v string) *UpdateDevicePoolInput { s.Description = &v return s } +// SetMaxDevices sets the MaxDevices field's value. +func (s *UpdateDevicePoolInput) SetMaxDevices(v int64) *UpdateDevicePoolInput { + s.MaxDevices = &v + return s +} + // SetName sets the Name field's value. func (s *UpdateDevicePoolInput) SetName(v string) *UpdateDevicePoolInput { s.Name = &v @@ -16502,6 +16600,15 @@ const ( // DeviceAttributeFleetType is a DeviceAttribute enum value DeviceAttributeFleetType = "FLEET_TYPE" + + // DeviceAttributeOsVersion is a DeviceAttribute enum value + DeviceAttributeOsVersion = "OS_VERSION" + + // DeviceAttributeModel is a DeviceAttribute enum value + DeviceAttributeModel = "MODEL" + + // DeviceAttributeAvailability is a DeviceAttribute enum value + DeviceAttributeAvailability = "AVAILABILITY" ) const ( @@ -16556,32 +16663,6 @@ const ( DeviceFilterAttributeFleetType = "FLEET_TYPE" ) -const ( - // DeviceFilterOperatorEquals is a DeviceFilterOperator enum value - DeviceFilterOperatorEquals = "EQUALS" - - // DeviceFilterOperatorLessThan is a DeviceFilterOperator enum value - DeviceFilterOperatorLessThan = "LESS_THAN" - - // DeviceFilterOperatorLessThanOrEquals is a DeviceFilterOperator enum value - DeviceFilterOperatorLessThanOrEquals = "LESS_THAN_OR_EQUALS" - - // DeviceFilterOperatorGreaterThan is a DeviceFilterOperator enum value - DeviceFilterOperatorGreaterThan = "GREATER_THAN" - - // DeviceFilterOperatorGreaterThanOrEquals is a DeviceFilterOperator enum value - DeviceFilterOperatorGreaterThanOrEquals = "GREATER_THAN_OR_EQUALS" - - // DeviceFilterOperatorIn is a DeviceFilterOperator enum value - DeviceFilterOperatorIn = "IN" - - // DeviceFilterOperatorNotIn is a DeviceFilterOperator enum value - DeviceFilterOperatorNotIn = "NOT_IN" - - // DeviceFilterOperatorContains is a DeviceFilterOperator enum value - DeviceFilterOperatorContains = "CONTAINS" -) - const ( // DeviceFormFactorPhone is a DeviceFormFactor enum value DeviceFormFactorPhone = "PHONE" @@ -16727,9 +16808,15 @@ const ( // RuleOperatorLessThan is a RuleOperator enum value RuleOperatorLessThan = "LESS_THAN" + // RuleOperatorLessThanOrEquals is a RuleOperator enum value + RuleOperatorLessThanOrEquals = "LESS_THAN_OR_EQUALS" + // RuleOperatorGreaterThan is a RuleOperator enum value RuleOperatorGreaterThan = "GREATER_THAN" + // RuleOperatorGreaterThanOrEquals is a RuleOperator enum value + RuleOperatorGreaterThanOrEquals = "GREATER_THAN_OR_EQUALS" + // RuleOperatorIn is a RuleOperator enum value RuleOperatorIn = "IN" diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go index bf26d3ccdb70..9f5c8a306b70 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go @@ -8,6 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" ) const opAddFlowOutputs = "AddFlowOutputs" @@ -803,6 +805,97 @@ func (c *MediaConnect) ListFlowsPagesWithContext(ctx aws.Context, input *ListFlo return p.Err() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListTagsForResource +func (c *MediaConnect) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS MediaConnect. +// +// Lists all tags associated with the resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ErrCodeBadRequestException "BadRequestException" +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListTagsForResource +func (c *MediaConnect) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRemoveFlowOutput = "RemoveFlowOutput" // RemoveFlowOutputRequest generates a "aws/request.Request" representing the @@ -1232,6 +1325,191 @@ func (c *MediaConnect) StopFlowWithContext(ctx aws.Context, input *StopFlowInput return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/TagResource +func (c *MediaConnect) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS MediaConnect. +// +// Associates the specified tags to a resource. If the request does not mention +// an existing tag associated with the resource, that tag is not changed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ErrCodeBadRequestException "BadRequestException" +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/TagResource +func (c *MediaConnect) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UntagResource +func (c *MediaConnect) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS MediaConnect. +// +// Deletes the specified tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ErrCodeBadRequestException "BadRequestException" +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UntagResource +func (c *MediaConnect) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateFlowEntitlement = "UpdateFlowEntitlement" // UpdateFlowEntitlementRequest generates a "aws/request.Request" representing the @@ -2653,6 +2931,70 @@ func (s *ListFlowsOutput) SetNextToken(v string) *ListFlowsOutput { return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +// AWS Elemental MediaConnect listed the tags associated with the resource. +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A map from tag keys to values. Tag keys can have a maximum character length + // of 128 characters, and tag values can have a maximum length of 256 characters. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // An entitlement that has been granted to you from other AWS accounts. type ListedEntitlement struct { _ struct{} `type:"structure"` @@ -3456,6 +3798,76 @@ func (s *StopFlowOutput) SetStatus(v string) *StopFlowOutput { return s } +// The tags to add to the resource. Tag keys can have a maximum character length +// of 128 characters, and tag values can have a maximum length of 256 characters. +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // A map from tag keys to values. Tag keys can have a maximum character length + // of 128 characters, and tag values can have a maximum length of 256 characters. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // Attributes related to the transport stream that are used in a source or output. type Transport struct { _ struct{} `type:"structure"` @@ -3519,6 +3931,71 @@ func (s *Transport) SetStreamId(v string) *Transport { return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + // Information about the encryption of the flow. type UpdateEncryption struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go b/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go index 0963a017c349..5abd1b2c4a61 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go @@ -2845,9 +2845,7 @@ func (s *ArchiveContainerSettings) SetM2tsSettings(v *M2tsSettings) *ArchiveCont type ArchiveGroupSettings struct { _ struct{} `type:"structure"` - // A directory and base filename where archive files should be written. If the - // base filename portion of the URI is left blank, the base filename of the - // first input will be automatically inserted. + // A directory and base filename where archive files should be written. // // Destination is a required field Destination *OutputLocationRef `locationName:"destination" type:"structure" required:"true"` @@ -7657,6 +7655,119 @@ func (s *FollowModeScheduleActionStartSettings) SetReferenceActionName(v string) return s } +// Frame Capture Group Settings +type FrameCaptureGroupSettings struct { + _ struct{} `type:"structure"` + + // The destination for the frame capture files. Either the URI for an Amazon + // S3 bucket and object, plus a file name prefix (for example, s3ssl://sportsDelivery/highlights/20180820/curling_) + // or the URI for a MediaStore container, plus a file name prefix (for example, + // mediastoressl://sportsDelivery/20180820/curling_). The final file names consist + // of the prefix from the destination field (for example, "curling_") + name + // modifier + the counter (5 digits, starting from 00001) + extension (which + // is always .jpg). For example, curlingLow.00001.jpg + // + // Destination is a required field + Destination *OutputLocationRef `locationName:"destination" type:"structure" required:"true"` +} + +// String returns the string representation +func (s FrameCaptureGroupSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FrameCaptureGroupSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FrameCaptureGroupSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FrameCaptureGroupSettings"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *FrameCaptureGroupSettings) SetDestination(v *OutputLocationRef) *FrameCaptureGroupSettings { + s.Destination = v + return s +} + +// Frame Capture Output Settings +type FrameCaptureOutputSettings struct { + _ struct{} `type:"structure"` + + // Required if the output group contains more than one output. This modifier + // forms part of the output file name. + NameModifier *string `locationName:"nameModifier" type:"string"` +} + +// String returns the string representation +func (s FrameCaptureOutputSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FrameCaptureOutputSettings) GoString() string { + return s.String() +} + +// SetNameModifier sets the NameModifier field's value. +func (s *FrameCaptureOutputSettings) SetNameModifier(v string) *FrameCaptureOutputSettings { + s.NameModifier = &v + return s +} + +// Frame Capture Settings +type FrameCaptureSettings struct { + _ struct{} `type:"structure"` + + // The frequency, in seconds, for capturing frames for inclusion in the output. + // For example, "10" means capture a frame every 10 seconds. + // + // CaptureInterval is a required field + CaptureInterval *int64 `locationName:"captureInterval" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s FrameCaptureSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FrameCaptureSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FrameCaptureSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FrameCaptureSettings"} + if s.CaptureInterval == nil { + invalidParams.Add(request.NewErrParamRequired("CaptureInterval")) + } + if s.CaptureInterval != nil && *s.CaptureInterval < 1 { + invalidParams.Add(request.NewErrParamMinValue("CaptureInterval", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCaptureInterval sets the CaptureInterval field's value. +func (s *FrameCaptureSettings) SetCaptureInterval(v int64) *FrameCaptureSettings { + s.CaptureInterval = &v + return s +} + type GlobalConfiguration struct { _ struct{} `type:"structure"` @@ -7791,11 +7902,11 @@ type H264Settings struct { FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H264FramerateControl"` // Framerate denominator. - FramerateDenominator *int64 `locationName:"framerateDenominator" type:"integer"` + FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 // fps. - FramerateNumerator *int64 `locationName:"framerateNumerator" type:"integer"` + FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // If enabled, use reference B frames for GOP structures that have B frames // > 1. @@ -7929,6 +8040,12 @@ func (s *H264Settings) Validate() error { if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000)) } + if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) + } + if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) + } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000)) } @@ -8414,6 +8531,9 @@ type HlsGroupSettings struct { // Parameters that control interactions with the CDN. HlsCdnSettings *HlsCdnSettings `locationName:"hlsCdnSettings" type:"structure"` + // If enabled, writes out I-Frame only playlists in addition to media playlists. + IFrameOnlyPlaylists *string `locationName:"iFrameOnlyPlaylists" type:"string" enum:"IFrameOnlyPlaylistType"` + // If mode is "live", the number of segments to retain in the manifest (.m3u8) // file. This number must be less than or equal to keepSegments. If mode is // "vod", this parameter has no effect. @@ -8645,6 +8765,12 @@ func (s *HlsGroupSettings) SetHlsCdnSettings(v *HlsCdnSettings) *HlsGroupSetting return s } +// SetIFrameOnlyPlaylists sets the IFrameOnlyPlaylists field's value. +func (s *HlsGroupSettings) SetIFrameOnlyPlaylists(v string) *HlsGroupSettings { + s.IFrameOnlyPlaylists = &v + return s +} + // SetIndexNSegments sets the IndexNSegments field's value. func (s *HlsGroupSettings) SetIndexNSegments(v int64) *HlsGroupSettings { s.IndexNSegments = &v @@ -11943,6 +12069,9 @@ type OutputGroupSettings struct { ArchiveGroupSettings *ArchiveGroupSettings `locationName:"archiveGroupSettings" type:"structure"` + // Frame Capture Group Settings + FrameCaptureGroupSettings *FrameCaptureGroupSettings `locationName:"frameCaptureGroupSettings" type:"structure"` + HlsGroupSettings *HlsGroupSettings `locationName:"hlsGroupSettings" type:"structure"` MsSmoothGroupSettings *MsSmoothGroupSettings `locationName:"msSmoothGroupSettings" type:"structure"` @@ -11970,6 +12099,11 @@ func (s *OutputGroupSettings) Validate() error { invalidParams.AddNested("ArchiveGroupSettings", err.(request.ErrInvalidParams)) } } + if s.FrameCaptureGroupSettings != nil { + if err := s.FrameCaptureGroupSettings.Validate(); err != nil { + invalidParams.AddNested("FrameCaptureGroupSettings", err.(request.ErrInvalidParams)) + } + } if s.HlsGroupSettings != nil { if err := s.HlsGroupSettings.Validate(); err != nil { invalidParams.AddNested("HlsGroupSettings", err.(request.ErrInvalidParams)) @@ -11998,6 +12132,12 @@ func (s *OutputGroupSettings) SetArchiveGroupSettings(v *ArchiveGroupSettings) * return s } +// SetFrameCaptureGroupSettings sets the FrameCaptureGroupSettings field's value. +func (s *OutputGroupSettings) SetFrameCaptureGroupSettings(v *FrameCaptureGroupSettings) *OutputGroupSettings { + s.FrameCaptureGroupSettings = v + return s +} + // SetHlsGroupSettings sets the HlsGroupSettings field's value. func (s *OutputGroupSettings) SetHlsGroupSettings(v *HlsGroupSettings) *OutputGroupSettings { s.HlsGroupSettings = v @@ -12050,6 +12190,9 @@ type OutputSettings struct { ArchiveOutputSettings *ArchiveOutputSettings `locationName:"archiveOutputSettings" type:"structure"` + // Frame Capture Output Settings + FrameCaptureOutputSettings *FrameCaptureOutputSettings `locationName:"frameCaptureOutputSettings" type:"structure"` + HlsOutputSettings *HlsOutputSettings `locationName:"hlsOutputSettings" type:"structure"` MsSmoothOutputSettings *MsSmoothOutputSettings `locationName:"msSmoothOutputSettings" type:"structure"` @@ -12105,6 +12248,12 @@ func (s *OutputSettings) SetArchiveOutputSettings(v *ArchiveOutputSettings) *Out return s } +// SetFrameCaptureOutputSettings sets the FrameCaptureOutputSettings field's value. +func (s *OutputSettings) SetFrameCaptureOutputSettings(v *FrameCaptureOutputSettings) *OutputSettings { + s.FrameCaptureOutputSettings = v + return s +} + // SetHlsOutputSettings sets the HlsOutputSettings field's value. func (s *OutputSettings) SetHlsOutputSettings(v *HlsOutputSettings) *OutputSettings { s.HlsOutputSettings = v @@ -14935,6 +15084,9 @@ func (s *ValidationError) SetErrorMessage(v string) *ValidationError { type VideoCodecSettings struct { _ struct{} `type:"structure"` + // Frame Capture Settings + FrameCaptureSettings *FrameCaptureSettings `locationName:"frameCaptureSettings" type:"structure"` + H264Settings *H264Settings `locationName:"h264Settings" type:"structure"` } @@ -14951,6 +15103,11 @@ func (s VideoCodecSettings) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *VideoCodecSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "VideoCodecSettings"} + if s.FrameCaptureSettings != nil { + if err := s.FrameCaptureSettings.Validate(); err != nil { + invalidParams.AddNested("FrameCaptureSettings", err.(request.ErrInvalidParams)) + } + } if s.H264Settings != nil { if err := s.H264Settings.Validate(); err != nil { invalidParams.AddNested("H264Settings", err.(request.ErrInvalidParams)) @@ -14963,6 +15120,12 @@ func (s *VideoCodecSettings) Validate() error { return nil } +// SetFrameCaptureSettings sets the FrameCaptureSettings field's value. +func (s *VideoCodecSettings) SetFrameCaptureSettings(v *FrameCaptureSettings) *VideoCodecSettings { + s.FrameCaptureSettings = v + return s +} + // SetH264Settings sets the H264Settings field's value. func (s *VideoCodecSettings) SetH264Settings(v *H264Settings) *VideoCodecSettings { s.H264Settings = v @@ -14976,8 +15139,10 @@ type VideoDescription struct { // Video codec settings. CodecSettings *VideoCodecSettings `locationName:"codecSettings" type:"structure"` - // Output video height (in pixels). Leave blank to use source video height. - // If left blank, width must also be unspecified. + // Output video height, in pixels. Must be an even number. For most codecs, + // you can leave this field and width blank in order to use the height and width + // (resolution) from the source. Note, however, that leaving blank is not recommended. + // For the Frame Capture codec, height and width are required. Height *int64 `locationName:"height" type:"integer"` // The name of this VideoDescription. Outputs will use this name to uniquely @@ -14987,24 +15152,30 @@ type VideoDescription struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` - // Indicates how to respond to the AFD values in the input stream. Setting to - // "respond" causes input video to be clipped, depending on AFD value, input - // display aspect ratio and output display aspect ratio. + // Indicates how to respond to the AFD values in the input stream. RESPOND causes + // input video to be clipped, depending on the AFD value, input display aspect + // ratio, and output display aspect ratio, and (except for FRAMECAPTURE codec) + // includes the values in the output. PASSTHROUGH (does not apply to FRAMECAPTURE + // codec) ignores the AFD values and includes the values in the output, so input + // video is not clipped. NONE ignores the AFD values and does not include the + // values through to the output, so input video is not clipped. RespondToAfd *string `locationName:"respondToAfd" type:"string" enum:"VideoDescriptionRespondToAfd"` - // When set to "stretchToOutput", automatically configures the output position - // to stretch the video to the specified output resolution. This option will - // override any position value. + // STRETCHTOOUTPUT configures the output position to stretch the video to the + // specified output resolution (height and width). This option will override + // any position value. DEFAULT may insert black boxes (pillar boxes or letter + // boxes) around the video to provide the specified output resolution. ScalingBehavior *string `locationName:"scalingBehavior" type:"string" enum:"VideoDescriptionScalingBehavior"` - // Changes the width of the anti-alias filter kernel used for scaling. Only - // applies if scaling is being performed and antiAlias is set to true. 0 is - // the softest setting, 100 the sharpest, and 50 recommended for most content. + // Changes the strength of the anti-alias filter used for scaling. 0 is the + // softest setting, 100 is the sharpest. A setting of 50 is recommended for + // most content. Sharpness *int64 `locationName:"sharpness" type:"integer"` - // Output video width (in pixels). Leave out to use source video width. If left - // out, height must also be left out. Display aspect ratio is always preserved - // by letterboxing or pillarboxing when necessary. + // Output video width, in pixels. Must be an even number. For most codecs, you + // can leave this field and height blank in order to use the height and width + // (resolution) from the source. Note, however, that leaving blank is not recommended. + // For the Frame Capture codec, height and width are required. Width *int64 `locationName:"width" type:"integer"` } @@ -16325,6 +16496,17 @@ const ( HlsWebdavHttpTransferModeNonChunked = "NON_CHUNKED" ) +// When set to "standard", an I-Frame only playlist will be written out for +// each video output in the output group. This I-Frame only playlist will contain +// byte range offsets pointing to the I-frame(s) in each segment. +const ( + // IFrameOnlyPlaylistTypeDisabled is a IFrameOnlyPlaylistType enum value + IFrameOnlyPlaylistTypeDisabled = "DISABLED" + + // IFrameOnlyPlaylistTypeStandard is a IFrameOnlyPlaylistType enum value + IFrameOnlyPlaylistTypeStandard = "STANDARD" +) + // codec in increasing order of complexity const ( // InputCodecMpeg2 is a InputCodec enum value diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 000000000000..3d97fc7a29f8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 000000000000..1b4f6c208a14 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,23 @@ +Anton Povarov +Brian Goff +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +Johan Brandhorst +John Shahid +John Tuley +Laurent +Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Vyacheslav Kim +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 000000000000..f57de90da8ac --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 000000000000..00d65f327732 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 000000000000..a26b046d94f1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 000000000000..24552483c6ce --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 000000000000..d9aa3c42d666 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,428 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 000000000000..fe1bd7d904e2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 000000000000..93464c91cffb --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 000000000000..e748e1730e1c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 000000000000..3abfed2cff04 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,203 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 000000000000..0f5fb173e9fd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 000000000000..d4db5a1c1457 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 000000000000..44ebd457cf61 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,604 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 000000000000..53ebd8cca01c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,368 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 000000000000..b2271d0b7b86 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,987 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 000000000000..b3aa39190a13 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 000000000000..3b6ca41d5e55 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,314 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "sync" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + + case map[int32]Extension: + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, + } + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + + default: + return nil, errors.New("proto: not an extension map") + } +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000000..b6cad90834b3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 000000000000..7ffd3c29d90c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000000..d55a335d9453 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 000000000000..aca8eed02a11 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 000000000000..04dcb8d9ef26 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,608 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 000000000000..40ea3dd935c2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 000000000000..5a5fd93f7c19 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 000000000000..ba58c49a43f9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3006 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 000000000000..997f57c1e102 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 000000000000..f520106e09f5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,657 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 000000000000..e6b15c76cabe --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2245 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if fn.IsValid() && len(oneofFields) > 0 { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 000000000000..00d6c7ad9376 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 000000000000..0407ba85d01c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,928 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 000000000000..1d6c6aa0e41b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 000000000000..1ce0be2fa9be --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1018 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 000000000000..9324f6542bcf --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 000000000000..38439fa99013 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 000000000000..b175d1b64234 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 000000000000..c1cf7bf85e9a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 000000000000..ceadde6a5e10 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,101 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml new file mode 100644 index 000000000000..f8684d99fc4e --- /dev/null +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.4 + - 1.3 + - 1.2 + - tip + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + +script: + - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md new file mode 100644 index 000000000000..51cf5cd1adae --- /dev/null +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Go makes it very simple to ensure properly formatted code, so always run + `go fmt` on your code before committing it. You should also run + [golint][] over your code. As noted in the [golint readme][], it's not + strictly necessary that your code be completely "lint-free", but this will + help you find common style issues. + + 1. Any significant changes should almost always be accompanied by tests. The + project already has good test coverage, so look at some of the existing + tests if you're unsure how to go about it. [gocov][] and [gocov-html][] + are invaluable tools for seeing which parts of your code aren't being + exercised by your tests. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[golint]: https://github.com/golang/lint +[golint readme]: https://github.com/golang/lint/blob/master/README +[gocov]: https://github.com/axw/gocov +[gocov-html]: https://github.com/matm/gocov-html +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/google/gofuzz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md new file mode 100644 index 000000000000..64869af347a5 --- /dev/null +++ b/vendor/github.com/google/gofuzz/README.md @@ -0,0 +1,71 @@ +gofuzz +====== + +gofuzz is a library for populating go objects with random values. + +[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz) +[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) + +This is useful for testing: + +* Do your project's objects really serialize/unserialize correctly in all cases? +* Is there an incorrectly formatted object that will cause your project to panic? + +Import with ```import "github.com/google/gofuzz"``` + +You can use it on single variables: +```go +f := fuzz.New() +var myInt int +f.Fuzz(&myInt) // myInt gets a random value. +``` + +You can use it on maps: +```go +f := fuzz.New().NilChance(0).NumElements(1, 1) +var myMap map[ComplexKeyType]string +f.Fuzz(&myMap) // myMap will have exactly one element. +``` + +Customize the chance of getting a nil pointer: +```go +f := fuzz.New().NilChance(.5) +var fancyStruct struct { + A, B, C, D *string +} +f.Fuzz(&fancyStruct) // About half the pointers should be set. +``` + +You can even customize the randomization completely if needed: +```go +type MyEnum string +const ( + A MyEnum = "A" + B MyEnum = "B" +) +type MyInfo struct { + Type MyEnum + AInfo *string + BInfo *string +} + +f := fuzz.New().NilChance(0).Funcs( + func(e *MyInfo, c fuzz.Continue) { + switch c.Intn(2) { + case 0: + e.Type = A + c.Fuzz(&e.AInfo) + case 1: + e.Type = B + c.Fuzz(&e.BInfo) + } + }, +) + +var myObject MyInfo +f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. +``` + +See more examples in ```example_test.go```. + +Happy testing! diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go new file mode 100644 index 000000000000..9f9956d4a64f --- /dev/null +++ b/vendor/github.com/google/gofuzz/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fuzz is a library for populating go objects with random values. +package fuzz diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go new file mode 100644 index 000000000000..1dfa80a6fca9 --- /dev/null +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -0,0 +1,487 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzz + +import ( + "fmt" + "math/rand" + "reflect" + "time" +) + +// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. +type fuzzFuncMap map[reflect.Type]reflect.Value + +// Fuzzer knows how to fill any object with random fields. +type Fuzzer struct { + fuzzFuncs fuzzFuncMap + defaultFuzzFuncs fuzzFuncMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int +} + +// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, +// RandSource, NilChance, or NumElements in any order. +func New() *Fuzzer { + return NewWithSeed(time.Now().UnixNano()) +} + +func NewWithSeed(seed int64) *Fuzzer { + f := &Fuzzer{ + defaultFuzzFuncs: fuzzFuncMap{ + reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), + }, + + fuzzFuncs: fuzzFuncMap{}, + r: rand.New(rand.NewSource(seed)), + nilChance: .2, + minElements: 1, + maxElements: 10, + maxDepth: 100, + } + return f +} + +// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. +// +// Each entry in fuzzFuncs must be a function taking two parameters. +// The first parameter must be a pointer or map. It is the variable that +// function will fill with random data. The second parameter must be a +// fuzz.Continue, which will provide a source of randomness and a way +// to automatically continue fuzzing smaller pieces of the first parameter. +// +// These functions are called sensibly, e.g., if you wanted custom string +// fuzzing, the function `func(s *string, c fuzz.Continue)` would get +// called and passed the address of strings. Maps and pointers will always +// be made/new'd for you, ignoring the NilChange option. For slices, it +// doesn't make much sense to pre-create them--Fuzzer doesn't know how +// long you want your slice--so take a pointer to a slice, and make it +// yourself. (If you don't want your map/pointer type pre-made, take a +// pointer to it, and make it yourself.) See the examples for a range of +// custom functions. +func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { + for i := range fuzzFuncs { + v := reflect.ValueOf(fuzzFuncs[i]) + if v.Kind() != reflect.Func { + panic("Need only funcs!") + } + t := v.Type() + if t.NumIn() != 2 || t.NumOut() != 0 { + panic("Need 2 in and 0 out params!") + } + argT := t.In(0) + switch argT.Kind() { + case reflect.Ptr, reflect.Map: + default: + panic("fuzzFunc must take pointer or map type") + } + if t.In(1) != reflect.TypeOf(Continue{}) { + panic("fuzzFunc's second parameter must be type fuzz.Continue") + } + f.fuzzFuncs[argT] = v + } + return f +} + +// RandSource causes f to get values from the given source of randomness. +// Use if you want deterministic fuzzing. +func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { + f.r = rand.New(s) + return f +} + +// NilChance sets the probability of creating a nil pointer, map, or slice to +// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. +func (f *Fuzzer) NilChance(p float64) *Fuzzer { + if p < 0 || p > 1 { + panic("p should be between 0 and 1, inclusive.") + } + f.nilChance = p + return f +} + +// NumElements sets the minimum and maximum number of elements that will be +// added to a non-nil map or slice. +func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { + if atLeast > atMost { + panic("atLeast must be <= atMost") + } + if atLeast < 0 { + panic("atLeast must be >= 0") + } + f.minElements = atLeast + f.maxElements = atMost + return f +} + +func (f *Fuzzer) genElementCount() int { + if f.minElements == f.maxElements { + return f.minElements + } + return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) +} + +func (f *Fuzzer) genShouldFill() bool { + return f.r.Float64() > f.nilChance +} + +// MaxDepth sets the maximum number of recursive fuzz calls that will be made +// before stopping. This includes struct members, pointers, and map and slice +// elements. +func (f *Fuzzer) MaxDepth(d int) *Fuzzer { + f.maxDepth = d + return f +} + +// Fuzz recursively fills all of obj's fields with something random. First +// this tries to find a custom fuzz function (see Funcs). If there is no +// custom function this tests whether the object implements fuzz.Interface and, +// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if +// there is a default fuzz function provided by this package. If all of that +// fails, this will generate random values for all primitive fields and then +// recurse for all non-primitives. +// +// This is safe for cyclic or tree-like structs, up to a limit. Use the +// MaxDepth method to adjust how deep you need it to recurse. +// +// obj must be a pointer. Only exported (public) fields can be set (thanks, +// golang :/ ) Intended for tests, so will panic on bad input or unimplemented +// fields. +func (f *Fuzzer) Fuzz(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.fuzzWithContext(v, 0) +} + +// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +// Not safe for cyclic or tree-like structs! +// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) +// Intended for tests, so will panic on bad input or unimplemented fields. +func (f *Fuzzer) FuzzNoCustom(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.fuzzWithContext(v, flagNoCustomFuzz) +} + +const ( + // Do not try to find a custom fuzz function. Does not apply recursively. + flagNoCustomFuzz uint64 = 1 << iota +) + +func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { + fc := &fuzzerContext{fuzzer: f} + fc.doFuzz(v, flags) +} + +// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer +// be thread-safe. +type fuzzerContext struct { + fuzzer *Fuzzer + curDepth int +} + +func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { + if fc.curDepth >= fc.fuzzer.maxDepth { + return + } + fc.curDepth++ + defer func() { fc.curDepth-- }() + + if !v.CanSet() { + return + } + + if flags&flagNoCustomFuzz == 0 { + // Check for both pointer and non-pointer custom functions. + if v.CanAddr() && fc.tryCustom(v.Addr()) { + return + } + if fc.tryCustom(v) { + return + } + } + + if fn, ok := fillFuncMap[v.Kind()]; ok { + fn(v, fc.fuzzer.r) + return + } + switch v.Kind() { + case reflect.Map: + if fc.fuzzer.genShouldFill() { + v.Set(reflect.MakeMap(v.Type())) + n := fc.fuzzer.genElementCount() + for i := 0; i < n; i++ { + key := reflect.New(v.Type().Key()).Elem() + fc.doFuzz(key, 0) + val := reflect.New(v.Type().Elem()).Elem() + fc.doFuzz(val, 0) + v.SetMapIndex(key, val) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Ptr: + if fc.fuzzer.genShouldFill() { + v.Set(reflect.New(v.Type().Elem())) + fc.doFuzz(v.Elem(), 0) + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Slice: + if fc.fuzzer.genShouldFill() { + n := fc.fuzzer.genElementCount() + v.Set(reflect.MakeSlice(v.Type(), n, n)) + for i := 0; i < n; i++ { + fc.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Array: + if fc.fuzzer.genShouldFill() { + n := v.Len() + for i := 0; i < n; i++ { + fc.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + fc.doFuzz(v.Field(i), 0) + } + case reflect.Chan: + fallthrough + case reflect.Func: + fallthrough + case reflect.Interface: + fallthrough + default: + panic(fmt.Sprintf("Can't handle %#v", v.Interface())) + } +} + +// tryCustom searches for custom handlers, and returns true iff it finds a match +// and successfully randomizes v. +func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { + // First: see if we have a fuzz function for it. + doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] + if !ok { + // Second: see if it can fuzz itself. + if v.CanInterface() { + intf := v.Interface() + if fuzzable, ok := intf.(Interface); ok { + fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) + return true + } + } + // Finally: see if there is a default fuzz function. + doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] + if !ok { + return false + } + } + + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.New(v.Type().Elem())) + } + case reflect.Map: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.MakeMap(v.Type())) + } + default: + return false + } + + doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ + fc: fc, + Rand: fc.fuzzer.r, + })}) + return true +} + +// Interface represents an object that knows how to fuzz itself. Any time we +// find a type that implements this interface we will delegate the act of +// fuzzing itself. +type Interface interface { + Fuzz(c Continue) +} + +// Continue can be passed to custom fuzzing functions to allow them to use +// the correct source of randomness and to continue fuzzing their members. +type Continue struct { + fc *fuzzerContext + + // For convenience, Continue implements rand.Rand via embedding. + // Use this for generating any randomness if you want your fuzzing + // to be repeatable for a given seed. + *rand.Rand +} + +// Fuzz continues fuzzing obj. obj must be a pointer. +func (c Continue) Fuzz(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.fc.doFuzz(v, 0) +} + +// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +func (c Continue) FuzzNoCustom(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.fc.doFuzz(v, flagNoCustomFuzz) +} + +// RandString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func (c Continue) RandString() string { + return randString(c.Rand) +} + +// RandUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func (c Continue) RandUint64() uint64 { + return randUint64(c.Rand) +} + +// RandBool returns true or false randomly. +func (c Continue) RandBool() bool { + return randBool(c.Rand) +} + +func fuzzInt(v reflect.Value, r *rand.Rand) { + v.SetInt(int64(randUint64(r))) +} + +func fuzzUint(v reflect.Value, r *rand.Rand) { + v.SetUint(randUint64(r)) +} + +func fuzzTime(t *time.Time, c Continue) { + var sec, nsec int64 + // Allow for about 1000 years of random time values, which keeps things + // like JSON parsing reasonably happy. + sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) + c.Fuzz(&nsec) + *t = time.Unix(sec, nsec) +} + +var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ + reflect.Bool: func(v reflect.Value, r *rand.Rand) { + v.SetBool(randBool(r)) + }, + reflect.Int: fuzzInt, + reflect.Int8: fuzzInt, + reflect.Int16: fuzzInt, + reflect.Int32: fuzzInt, + reflect.Int64: fuzzInt, + reflect.Uint: fuzzUint, + reflect.Uint8: fuzzUint, + reflect.Uint16: fuzzUint, + reflect.Uint32: fuzzUint, + reflect.Uint64: fuzzUint, + reflect.Uintptr: fuzzUint, + reflect.Float32: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(float64(r.Float32())) + }, + reflect.Float64: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(r.Float64()) + }, + reflect.Complex64: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, + reflect.Complex128: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, + reflect.String: func(v reflect.Value, r *rand.Rand) { + v.SetString(randString(r)) + }, + reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, +} + +// randBool returns true or false randomly. +func randBool(r *rand.Rand) bool { + if r.Int()&1 == 1 { + return true + } + return false +} + +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose(rand *rand.Rand) rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {' ', '~'}, // ASCII characters + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +// randString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func randString(r *rand.Rand) string { + n := r.Intn(20) + runes := make([]rune, n) + for i := range runes { + runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) + } + return string(runes) +} + +// randUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func randUint64(r *rand.Rand) uint64 { + return uint64(r.Uint32())<<32 | uint64(r.Uint32()) +} diff --git a/vendor/github.com/kubernetes-sigs/aws-iam-authenticator/LICENSE b/vendor/github.com/kubernetes-sigs/aws-iam-authenticator/LICENSE new file mode 100644 index 000000000000..8dada3edaf50 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/aws-iam-authenticator/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kubernetes-sigs/aws-iam-authenticator/pkg/arn/arn.go b/vendor/github.com/kubernetes-sigs/aws-iam-authenticator/pkg/arn/arn.go new file mode 100644 index 000000000000..35efede93f23 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/aws-iam-authenticator/pkg/arn/arn.go @@ -0,0 +1,69 @@ +package arn + +import ( + "fmt" + "strings" + + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// Canonicalize validates IAM resources are appropriate for the authenticator +// and converts STS assumed roles into the IAM role resource. +// +// Supported IAM resources are: +// * AWS account: arn:aws:iam::123456789012:root +// * IAM user: arn:aws:iam::123456789012:user/Bob +// * IAM role: arn:aws:iam::123456789012:role/S3Access +// * IAM Assumed role: arn:aws:sts::123456789012:assumed-role/Accounting-Role/Mary (converted to IAM role) +// * Federated user: arn:aws:sts::123456789012:federated-user/Bob +func Canonicalize(arn string) (string, error) { + parsed, err := awsarn.Parse(arn) + if err != nil { + return "", fmt.Errorf("arn '%s' is invalid: '%v'", arn, err) + } + + if err := checkPartition(parsed.Partition); err != nil { + return "", fmt.Errorf("arn '%s' does not have a recognized partition", arn) + } + + parts := strings.Split(parsed.Resource, "/") + resource := parts[0] + + switch parsed.Service { + case "sts": + switch resource { + case "federated-user": + return arn, nil + case "assumed-role": + if len(parts) < 3 { + return "", fmt.Errorf("assumed-role arn '%s' does not have a role", arn) + } + // IAM ARNs can contain paths, part[0] is resource, parts[len(parts)] is the SessionName. + role := strings.Join(parts[1:len(parts)-1], "/") + return fmt.Sprintf("arn:%s:iam::%s:role/%s", parsed.Partition, parsed.AccountID, role), nil + default: + return "", fmt.Errorf("unrecognized resource %s for service sts", parsed.Resource) + } + case "iam": + switch resource { + case "role", "user", "root": + return arn, nil + default: + return "", fmt.Errorf("unrecognized resource %s for service iam", parsed.Resource) + } + } + + return "", fmt.Errorf("service %s in arn %s is not a valid service for identities", parsed.Service, arn) +} + +func checkPartition(partition string) error { + switch partition { + case endpoints.AwsPartitionID: + case endpoints.AwsCnPartitionID: + case endpoints.AwsUsGovPartitionID: + default: + return fmt.Errorf("partion %s is not recognized", partition) + } + return nil +} diff --git a/vendor/github.com/kubernetes-sigs/aws-iam-authenticator/pkg/token/token.go b/vendor/github.com/kubernetes-sigs/aws-iam-authenticator/pkg/token/token.go new file mode 100644 index 000000000000..14901f120ad3 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/aws-iam-authenticator/pkg/token/token.go @@ -0,0 +1,432 @@ +/* +Copyright 2017 by the contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package token + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/kubernetes-sigs/aws-iam-authenticator/pkg/arn" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientauthv1alpha1 "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" +) + +// Identity is returned on successful Verify() results. It contains a parsed +// version of the AWS identity used to create the token. +type Identity struct { + // ARN is the raw Amazon Resource Name returned by sts:GetCallerIdentity + ARN string + + // CanonicalARN is the Amazon Resource Name converted to a more canonical + // representation. In particular, STS assumed role ARNs like + // "arn:aws:sts::ACCOUNTID:assumed-role/ROLENAME/SESSIONNAME" are converted + // to their IAM ARN equivalent "arn:aws:iam::ACCOUNTID:role/NAME" + CanonicalARN string + + // AccountID is the 12 digit AWS account number. + AccountID string + + // UserID is the unique user/role ID (e.g., "AROAAAAAAAAAAAAAAAAAA"). + UserID string + + // SessionName is the STS session name (or "" if this is not a + // session-based identity). For EC2 instance roles, this will be the EC2 + // instance ID (e.g., "i-0123456789abcdef0"). You should only rely on it + // if you trust that _only_ EC2 is allowed to assume the IAM Role. If IAM + // users or other roles are allowed to assume the role, they can provide + // (nearly) arbitrary strings here. + SessionName string +} + +const ( + // The sts GetCallerIdentity request is valid for 15 minutes regardless of this parameters value after it has been + // signed, but we set this unused parameter to 60 for legacy reasons (we check for a value between 0 and 60 on the + // server side in 0.3.0 or earlier). IT IS IGNORED. If we can get STS to support x-amz-expires, then we should + // set this parameter to the actual expiration, and make it configurable. + requestPresignParam = 60 + // The actual token expiration (presigned STS urls are valid for 15 minutes after timestamp in x-amz-date). + presignedURLExpiration = 15 * time.Minute + v1Prefix = "k8s-aws-v1." + maxTokenLenBytes = 1024 * 4 + clusterIDHeader = "x-k8s-aws-id" + // Format of the X-Amz-Date header used for expiration + // https://golang.org/pkg/time/#pkg-constants + dateHeaderFormat = "20060102T150405Z" +) + +// Token is generated and used by Kubernetes client-go to authenticate with a Kubernetes cluster. +type Token struct { + Token string + Expiration time.Time +} + +// FormatError is returned when there is a problem with token that is +// an encoded sts request. This can include the url, data, action or anything +// else that prevents the sts call from being made. +type FormatError struct { + message string +} + +func (e FormatError) Error() string { + return "input token was not properly formatted: " + e.message +} + +// STSError is returned when there was either an error calling STS or a problem +// processing the data returned from STS. +type STSError struct { + message string +} + +func (e STSError) Error() string { + return "sts getCallerIdentity failed: " + e.message +} + +// NewSTSError creates a error of type STS. +func NewSTSError(m string) STSError { + return STSError{message: m} +} + +var parameterWhitelist = map[string]bool{ + "action": true, + "version": true, + "x-amz-algorithm": true, + "x-amz-credential": true, + "x-amz-date": true, + "x-amz-expires": true, + "x-amz-security-token": true, + "x-amz-signature": true, + "x-amz-signedheaders": true, +} + +// this is the result type from the GetCallerIdentity endpoint +type getCallerIdentityWrapper struct { + GetCallerIdentityResponse struct { + GetCallerIdentityResult struct { + Account string `json:"Account"` + Arn string `json:"Arn"` + UserID string `json:"UserId"` + } `json:"GetCallerIdentityResult"` + ResponseMetadata struct { + RequestID string `json:"RequestId"` + } `json:"ResponseMetadata"` + } `json:"GetCallerIdentityResponse"` +} + +// Generator provides new tokens for the heptio authenticator. +type Generator interface { + // Get a token using credentials in the default credentials chain. + Get(string) (Token, error) + // GetWithRole creates a token by assuming the provided role, using the credentials in the default chain. + GetWithRole(clusterID, roleARN string) (Token, error) + // GetWithRoleForSession creates a token by assuming the provided role, using the provided session. + GetWithRoleForSession(clusterID string, roleARN string, sess *session.Session) (Token, error) + // GetWithSTS returns a token valid for clusterID using the given STS client. + GetWithSTS(clusterID string, stsAPI *sts.STS) (Token, error) + // FormatJSON returns the client auth formatted json for the ExecCredential auth + FormatJSON(Token) string +} + +type generator struct { + forwardSessionName bool +} + +// NewGenerator creates a Generator and returns it. +func NewGenerator(forwardSessionName bool) (Generator, error) { + return generator{ + forwardSessionName: forwardSessionName, + }, nil +} + +// Get uses the directly available AWS credentials to return a token valid for +// clusterID. It follows the default AWS credential handling behavior. +func (g generator) Get(clusterID string) (Token, error) { + return g.GetWithRole(clusterID, "") +} + +func StdinStderrTokenProvider() (string, error) { + var v string + fmt.Fprint(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + return v, err +} + +// GetWithRole assumes the given AWS IAM role and returns a token valid for +// clusterID. If roleARN is empty, behaves like Get (does not assume a role). +func (g generator) GetWithRole(clusterID string, roleARN string) (Token, error) { + // create a session with the "base" credentials available + // (from environment variable, profile files, EC2 metadata, etc) + sess, err := session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: StdinStderrTokenProvider, + SharedConfigState: session.SharedConfigEnable, + }) + if err != nil { + return Token{}, fmt.Errorf("could not create session: %v", err) + } + + return g.GetWithRoleForSession(clusterID, roleARN, sess) +} + +// GetWithRole assumes the given AWS IAM role for the given session and behaves +// like GetWithRole. +func (g generator) GetWithRoleForSession(clusterID string, roleARN string, sess *session.Session) (Token, error) { + // use an STS client based on the direct credentials + stsAPI := sts.New(sess) + + // if a roleARN was specified, replace the STS client with one that uses + // temporary credentials from that role. + if roleARN != "" { + sessionSetter := func(provider *stscreds.AssumeRoleProvider) {} + if g.forwardSessionName { + // If the current session is already a federated identity, carry through + // this session name onto the new session to provide better debugging + // capabilities + resp, err := stsAPI.GetCallerIdentity(&sts.GetCallerIdentityInput{}) + if err != nil { + return Token{}, err + } + + userIDParts := strings.Split(*resp.UserId, ":") + sessionSetter = func(provider *stscreds.AssumeRoleProvider) { + if len(userIDParts) == 2 { + provider.RoleSessionName = userIDParts[1] + } + } + } + + // create STS-based credentials that will assume the given role + creds := stscreds.NewCredentials(sess, roleARN, sessionSetter) + + // create an STS API interface that uses the assumed role's temporary credentials + stsAPI = sts.New(sess, &aws.Config{Credentials: creds}) + } + + return g.GetWithSTS(clusterID, stsAPI) +} + +// GetWithSTS returns a token valid for clusterID using the given STS client. +func (g generator) GetWithSTS(clusterID string, stsAPI *sts.STS) (Token, error) { + // generate an sts:GetCallerIdentity request and add our custom cluster ID header + request, _ := stsAPI.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{}) + request.HTTPRequest.Header.Add(clusterIDHeader, clusterID) + + // Sign the request. The expires parameter (sets the x-amz-expires header) is + // currently ignored by STS, and the token expires 15 minutes after the x-amz-date + // timestamp regardless. We set it to 60 seconds for backwards compatibility (the + // parameter is a required argument to Presign(), and authenticators 0.3.0 and older are expecting a value between + // 0 and 60 on the server side). + // https://github.com/aws/aws-sdk-go/issues/2167 + presignedURLString, err := request.Presign(requestPresignParam) + if err != nil { + return Token{}, err + } + + // Set token expiration to 1 minute before the presigned URL expires for some cushion + tokenExpiration := time.Now().Local().Add(presignedURLExpiration - 1*time.Minute) + // TODO: this may need to be a constant-time base64 encoding + return Token{v1Prefix + base64.RawURLEncoding.EncodeToString([]byte(presignedURLString)), tokenExpiration}, nil +} + +// FormatJSON formats the json to support ExecCredential authentication +func (g generator) FormatJSON(token Token) string { + expirationTimestamp := metav1.NewTime(token.Expiration) + execInput := &clientauthv1alpha1.ExecCredential{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "client.authentication.k8s.io/v1alpha1", + Kind: "ExecCredential", + }, + Status: &clientauthv1alpha1.ExecCredentialStatus{ + ExpirationTimestamp: &expirationTimestamp, + Token: token.Token, + }, + } + enc, _ := json.Marshal(execInput) + return string(enc) +} + +// Verifier validates tokens by calling STS and returning the associated identity. +type Verifier interface { + Verify(token string) (*Identity, error) +} + +type tokenVerifier struct { + client *http.Client + clusterID string +} + +// NewVerifier creates a Verifier that is bound to the clusterID and uses the default http client. +func NewVerifier(clusterID string) Verifier { + return tokenVerifier{ + client: http.DefaultClient, + clusterID: clusterID, + } +} + +// Verify a token is valid for the specified clusterID. On success, returns an +// Identity that contains information about the AWS principal that created the +// token. On failure, returns nil and a non-nil error. +func (v tokenVerifier) Verify(token string) (*Identity, error) { + if len(token) > maxTokenLenBytes { + return nil, FormatError{"token is too large"} + } + + if !strings.HasPrefix(token, v1Prefix) { + return nil, FormatError{fmt.Sprintf("token is missing expected %q prefix", v1Prefix)} + } + + // TODO: this may need to be a constant-time base64 decoding + tokenBytes, err := base64.RawURLEncoding.DecodeString(strings.TrimPrefix(token, v1Prefix)) + if err != nil { + return nil, FormatError{err.Error()} + } + + parsedURL, err := url.Parse(string(tokenBytes)) + if err != nil { + return nil, FormatError{err.Error()} + } + + if parsedURL.Scheme != "https" { + return nil, FormatError{fmt.Sprintf("unexpected scheme %q in pre-signed URL", parsedURL.Scheme)} + } + + if parsedURL.Host != "sts.amazonaws.com" { + return nil, FormatError{"unexpected hostname in pre-signed URL"} + } + + if parsedURL.Path != "/" { + return nil, FormatError{"unexpected path in pre-signed URL"} + } + + queryParamsLower := make(url.Values) + queryParams := parsedURL.Query() + for key, values := range queryParams { + if !parameterWhitelist[strings.ToLower(key)] { + return nil, FormatError{fmt.Sprintf("non-whitelisted query parameter %q", key)} + } + if len(values) != 1 { + return nil, FormatError{"query parameter with multiple values not supported"} + } + queryParamsLower.Set(strings.ToLower(key), values[0]) + } + + if queryParamsLower.Get("action") != "GetCallerIdentity" { + return nil, FormatError{"unexpected action parameter in pre-signed URL"} + } + + if !hasSignedClusterIDHeader(&queryParamsLower) { + return nil, FormatError{fmt.Sprintf("client did not sign the %s header in the pre-signed URL", clusterIDHeader)} + } + + // We validate x-amz-expires is between 0 and 15 minutes (900 seconds) although currently pre-signed STS URLs, and + // therefore tokens, expire exactly 15 minutes after the x-amz-date header, regardless of x-amz-expires. + expires, err := strconv.Atoi(queryParamsLower.Get("x-amz-expires")) + if err != nil || expires < 0 || expires > 900 { + return nil, FormatError{fmt.Sprintf("invalid X-Amz-Expires parameter in pre-signed URL: %d", expires)} + } + + date := queryParamsLower.Get("x-amz-date") + if date == "" { + return nil, FormatError{"X-Amz-Date parameter must be present in pre-signed URL"} + } + + dateParam, err := time.Parse(dateHeaderFormat, date) + if err != nil { + return nil, FormatError{fmt.Sprintf("error parsing X-Amz-Date parameter %s into format %s: %s", date, dateHeaderFormat, err.Error())} + } + + now := time.Now() + expiration := dateParam.Add(presignedURLExpiration) + if now.After(expiration) { + return nil, FormatError{fmt.Sprintf("X-Amz-Date parameter is expired (%.f minute expiration) %s", presignedURLExpiration.Minutes(), dateParam)} + } + + req, err := http.NewRequest("GET", parsedURL.String(), nil) + req.Header.Set(clusterIDHeader, v.clusterID) + req.Header.Set("accept", "application/json") + + response, err := v.client.Do(req) + if err != nil { + // special case to avoid printing the full URL if possible + if urlErr, ok := err.(*url.Error); ok { + return nil, NewSTSError(fmt.Sprintf("error during GET: %v", urlErr.Err)) + } + return nil, NewSTSError(fmt.Sprintf("error during GET: %v", err)) + } + defer response.Body.Close() + + if response.StatusCode != 200 { + return nil, NewSTSError(fmt.Sprintf("error from AWS (expected 200, got %d)", response.StatusCode)) + } + + responseBody, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, NewSTSError(fmt.Sprintf("error reading HTTP result: %v", err)) + } + + var callerIdentity getCallerIdentityWrapper + err = json.Unmarshal(responseBody, &callerIdentity) + if err != nil { + return nil, NewSTSError(err.Error()) + } + + // parse the response into an Identity + id := &Identity{ + ARN: callerIdentity.GetCallerIdentityResponse.GetCallerIdentityResult.Arn, + AccountID: callerIdentity.GetCallerIdentityResponse.GetCallerIdentityResult.Account, + } + id.CanonicalARN, err = arn.Canonicalize(id.ARN) + if err != nil { + return nil, NewSTSError(err.Error()) + } + + // The user ID is either UserID:SessionName (for assumed roles) or just + // UserID (for IAM User principals). + userIDParts := strings.Split(callerIdentity.GetCallerIdentityResponse.GetCallerIdentityResult.UserID, ":") + if len(userIDParts) == 2 { + id.UserID = userIDParts[0] + id.SessionName = userIDParts[1] + } else if len(userIDParts) == 1 { + id.UserID = userIDParts[0] + } else { + return nil, STSError{fmt.Sprintf( + "malformed UserID %q", + callerIdentity.GetCallerIdentityResponse.GetCallerIdentityResult.UserID)} + } + + return id, nil +} + +func hasSignedClusterIDHeader(paramsLower *url.Values) bool { + signedHeaders := strings.Split(paramsLower.Get("x-amz-signedheaders"), ";") + for _, hdr := range signedHeaders { + if strings.ToLower(hdr) == strings.ToLower(clusterIDHeader) { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/inf.v0/LICENSE b/vendor/gopkg.in/inf.v0/LICENSE new file mode 100644 index 000000000000..87a5cede3392 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/inf.v0/dec.go b/vendor/gopkg.in/inf.v0/dec.go new file mode 100644 index 000000000000..26548b63cef4 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/dec.go @@ -0,0 +1,615 @@ +// Package inf (type inf.Dec) implements "infinite-precision" decimal +// arithmetic. +// "Infinite precision" describes two characteristics: practically unlimited +// precision for decimal number representation and no support for calculating +// with any specific fixed precision. +// (Although there is no practical limit on precision, inf.Dec can only +// represent finite decimals.) +// +// This package is currently in experimental stage and the API may change. +// +// This package does NOT support: +// - rounding to specific precisions (as opposed to specific decimal positions) +// - the notion of context (each rounding must be explicit) +// - NaN and Inf values, and distinguishing between positive and negative zero +// - conversions to and from float32/64 types +// +// Features considered for possible addition: +// + formatting options +// + Exp method +// + combined operations such as AddRound/MulAdd etc +// + exchanging data in decimal32/64/128 formats +// +package inf // import "gopkg.in/inf.v0" + +// TODO: +// - avoid excessive deep copying (quo and rounders) + +import ( + "fmt" + "io" + "math/big" + "strings" +) + +// A Dec represents a signed arbitrary-precision decimal. +// It is a combination of a sign, an arbitrary-precision integer coefficient +// value, and a signed fixed-precision exponent value. +// The sign and the coefficient value are handled together as a signed value +// and referred to as the unscaled value. +// (Positive and negative zero values are not distinguished.) +// Since the exponent is most commonly non-positive, it is handled in negated +// form and referred to as scale. +// +// The mathematical value of a Dec equals: +// +// unscaled * 10**(-scale) +// +// Note that different Dec representations may have equal mathematical values. +// +// unscaled scale String() +// ------------------------- +// 0 0 "0" +// 0 2 "0.00" +// 0 -2 "0" +// 1 0 "1" +// 100 2 "1.00" +// 10 0 "10" +// 1 -1 "10" +// +// The zero value for a Dec represents the value 0 with scale 0. +// +// Operations are typically performed through the *Dec type. +// The semantics of the assignment operation "=" for "bare" Dec values is +// undefined and should not be relied on. +// +// Methods are typically of the form: +// +// func (z *Dec) Op(x, y *Dec) *Dec +// +// and implement operations z = x Op y with the result as receiver; if it +// is one of the operands it may be overwritten (and its memory reused). +// To enable chaining of operations, the result is also returned. Methods +// returning a result other than *Dec take one of the operands as the receiver. +// +// A "bare" Quo method (quotient / division operation) is not provided, as the +// result is not always a finite decimal and thus in general cannot be +// represented as a Dec. +// Instead, in the common case when rounding is (potentially) necessary, +// QuoRound should be used with a Scale and a Rounder. +// QuoExact or QuoRound with RoundExact can be used in the special cases when it +// is known that the result is always a finite decimal. +// +type Dec struct { + unscaled big.Int + scale Scale +} + +// Scale represents the type used for the scale of a Dec. +type Scale int32 + +const scaleSize = 4 // bytes in a Scale value + +// Scaler represents a method for obtaining the scale to use for the result of +// an operation on x and y. +type scaler interface { + Scale(x *Dec, y *Dec) Scale +} + +var bigInt = [...]*big.Int{ + big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4), + big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9), + big.NewInt(10), +} + +var exp10cache [64]big.Int = func() [64]big.Int { + e10, e10i := [64]big.Int{}, bigInt[1] + for i := range e10 { + e10[i].Set(e10i) + e10i = new(big.Int).Mul(e10i, bigInt[10]) + } + return e10 +}() + +// NewDec allocates and returns a new Dec set to the given int64 unscaled value +// and scale. +func NewDec(unscaled int64, scale Scale) *Dec { + return new(Dec).SetUnscaled(unscaled).SetScale(scale) +} + +// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled +// value and scale. +func NewDecBig(unscaled *big.Int, scale Scale) *Dec { + return new(Dec).SetUnscaledBig(unscaled).SetScale(scale) +} + +// Scale returns the scale of x. +func (x *Dec) Scale() Scale { + return x.scale +} + +// Unscaled returns the unscaled value of x for u and true for ok when the +// unscaled value can be represented as int64; otherwise it returns an undefined +// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid +// checking the validity of the value when the check is known to be redundant. +func (x *Dec) Unscaled() (u int64, ok bool) { + u = x.unscaled.Int64() + var i big.Int + ok = i.SetInt64(u).Cmp(&x.unscaled) == 0 + return +} + +// UnscaledBig returns the unscaled value of x as *big.Int. +func (x *Dec) UnscaledBig() *big.Int { + return &x.unscaled +} + +// SetScale sets the scale of z, with the unscaled value unchanged, and returns +// z. +// The mathematical value of the Dec changes as if it was multiplied by +// 10**(oldscale-scale). +func (z *Dec) SetScale(scale Scale) *Dec { + z.scale = scale + return z +} + +// SetUnscaled sets the unscaled value of z, with the scale unchanged, and +// returns z. +func (z *Dec) SetUnscaled(unscaled int64) *Dec { + z.unscaled.SetInt64(unscaled) + return z +} + +// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and +// returns z. +func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec { + z.unscaled.Set(unscaled) + return z +} + +// Set sets z to the value of x and returns z. +// It does nothing if z == x. +func (z *Dec) Set(x *Dec) *Dec { + if z != x { + z.SetUnscaledBig(x.UnscaledBig()) + z.SetScale(x.Scale()) + } + return z +} + +// Sign returns: +// +// -1 if x < 0 +// 0 if x == 0 +// +1 if x > 0 +// +func (x *Dec) Sign() int { + return x.UnscaledBig().Sign() +} + +// Neg sets z to -x and returns z. +func (z *Dec) Neg(x *Dec) *Dec { + z.SetScale(x.Scale()) + z.UnscaledBig().Neg(x.UnscaledBig()) + return z +} + +// Cmp compares x and y and returns: +// +// -1 if x < y +// 0 if x == y +// +1 if x > y +// +func (x *Dec) Cmp(y *Dec) int { + xx, yy := upscale(x, y) + return xx.UnscaledBig().Cmp(yy.UnscaledBig()) +} + +// Abs sets z to |x| (the absolute value of x) and returns z. +func (z *Dec) Abs(x *Dec) *Dec { + z.SetScale(x.Scale()) + z.UnscaledBig().Abs(x.UnscaledBig()) + return z +} + +// Add sets z to the sum x+y and returns z. +// The scale of z is the greater of the scales of x and y. +func (z *Dec) Add(x, y *Dec) *Dec { + xx, yy := upscale(x, y) + z.SetScale(xx.Scale()) + z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig()) + return z +} + +// Sub sets z to the difference x-y and returns z. +// The scale of z is the greater of the scales of x and y. +func (z *Dec) Sub(x, y *Dec) *Dec { + xx, yy := upscale(x, y) + z.SetScale(xx.Scale()) + z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig()) + return z +} + +// Mul sets z to the product x*y and returns z. +// The scale of z is the sum of the scales of x and y. +func (z *Dec) Mul(x, y *Dec) *Dec { + z.SetScale(x.Scale() + y.Scale()) + z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig()) + return z +} + +// Round sets z to the value of x rounded to Scale s using Rounder r, and +// returns z. +func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec { + return z.QuoRound(x, NewDec(1, 0), s, r) +} + +// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the +// specified scale. +// +// If the rounder is RoundExact but the result can not be expressed exactly at +// the specified scale, QuoRound returns nil, and the value of z is undefined. +// +// There is no corresponding Div method; the equivalent can be achieved through +// the choice of Rounder used. +// +func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec { + return z.quo(x, y, sclr{s}, r) +} + +func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec { + scl := s.Scale(x, y) + var zzz *Dec + if r.UseRemainder() { + zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int)) + zzz = r.Round(new(Dec), zz, rA, rB) + } else { + zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil) + zzz = r.Round(new(Dec), zz, nil, nil) + } + if zzz == nil { + return nil + } + return z.Set(zzz) +} + +// QuoExact sets z to the quotient x/y and returns z when x/y is a finite +// decimal. Otherwise it returns nil and the value of z is undefined. +// +// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is +// calculated so that the remainder will be zero whenever x/y is a finite +// decimal. +func (z *Dec) QuoExact(x, y *Dec) *Dec { + return z.quo(x, y, scaleQuoExact{}, RoundExact) +} + +// quoRem sets z to the quotient x/y with the scale s, and if useRem is true, +// it sets remNum and remDen to the numerator and denominator of the remainder. +// It returns z, remNum and remDen. +// +// The remainder is normalized to the range -1 < r < 1 to simplify rounding; +// that is, the results satisfy the following equation: +// +// x / y = z + (remNum/remDen) * 10**(-z.Scale()) +// +// See Rounder for more details about rounding. +// +func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool, + remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) { + // difference (required adjustment) compared to "canonical" result scale + shift := s - (x.Scale() - y.Scale()) + // pointers to adjusted unscaled dividend and divisor + var ix, iy *big.Int + switch { + case shift > 0: + // increased scale: decimal-shift dividend left + ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift)) + iy = y.UnscaledBig() + case shift < 0: + // decreased scale: decimal-shift divisor left + ix = x.UnscaledBig() + iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift)) + default: + ix = x.UnscaledBig() + iy = y.UnscaledBig() + } + // save a copy of iy in case it to be overwritten with the result + iy2 := iy + if iy == z.UnscaledBig() { + iy2 = new(big.Int).Set(iy) + } + // set scale + z.SetScale(s) + // set unscaled + if useRem { + // Int division + _, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int)) + // set remainder + remNum.Set(intr) + remDen.Set(iy2) + } else { + z.UnscaledBig().Quo(ix, iy) + } + return z, remNum, remDen +} + +type sclr struct{ s Scale } + +func (s sclr) Scale(x, y *Dec) Scale { + return s.s +} + +type scaleQuoExact struct{} + +func (sqe scaleQuoExact) Scale(x, y *Dec) Scale { + rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig()) + f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5]) + var f10 Scale + if f2 > f5 { + f10 = Scale(f2) + } else { + f10 = Scale(f5) + } + return x.Scale() - y.Scale() + f10 +} + +func factor(n *big.Int, p *big.Int) int { + // could be improved for large factors + d, f := n, 0 + for { + dd, dm := new(big.Int).DivMod(d, p, new(big.Int)) + if dm.Sign() == 0 { + f++ + d = dd + } else { + break + } + } + return f +} + +func factor2(n *big.Int) int { + // could be improved for large factors + f := 0 + for ; n.Bit(f) == 0; f++ { + } + return f +} + +func upscale(a, b *Dec) (*Dec, *Dec) { + if a.Scale() == b.Scale() { + return a, b + } + if a.Scale() > b.Scale() { + bb := b.rescale(a.Scale()) + return a, bb + } + aa := a.rescale(b.Scale()) + return aa, b +} + +func exp10(x Scale) *big.Int { + if int(x) < len(exp10cache) { + return &exp10cache[int(x)] + } + return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil) +} + +func (x *Dec) rescale(newScale Scale) *Dec { + shift := newScale - x.Scale() + switch { + case shift < 0: + e := exp10(-shift) + return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale) + case shift > 0: + e := exp10(shift) + return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale) + } + return x +} + +var zeros = []byte("00000000000000000000000000000000" + + "00000000000000000000000000000000") +var lzeros = Scale(len(zeros)) + +func appendZeros(s []byte, n Scale) []byte { + for i := Scale(0); i < n; i += lzeros { + if n > i+lzeros { + s = append(s, zeros...) + } else { + s = append(s, zeros[0:n-i]...) + } + } + return s +} + +func (x *Dec) String() string { + if x == nil { + return "" + } + scale := x.Scale() + s := []byte(x.UnscaledBig().String()) + if scale <= 0 { + if scale != 0 && x.unscaled.Sign() != 0 { + s = appendZeros(s, -scale) + } + return string(s) + } + negbit := Scale(-((x.Sign() - 1) / 2)) + // scale > 0 + lens := Scale(len(s)) + if lens-negbit <= scale { + ss := make([]byte, 0, scale+2) + if negbit == 1 { + ss = append(ss, '-') + } + ss = append(ss, '0', '.') + ss = appendZeros(ss, scale-lens+negbit) + ss = append(ss, s[negbit:]...) + return string(ss) + } + // lens > scale + ss := make([]byte, 0, lens+1) + ss = append(ss, s[:lens-scale]...) + ss = append(ss, '.') + ss = append(ss, s[lens-scale:]...) + return string(ss) +} + +// Format is a support routine for fmt.Formatter. It accepts the decimal +// formats 'd' and 'f', and handles both equivalently. +// Width, precision, flags and bases 2, 8, 16 are not supported. +func (x *Dec) Format(s fmt.State, ch rune) { + if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' { + fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String()) + return + } + fmt.Fprintf(s, x.String()) +} + +func (z *Dec) scan(r io.RuneScanner) (*Dec, error) { + unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes + dp, dg := -1, -1 // indexes of decimal point, first digit +loop: + for { + ch, _, err := r.ReadRune() + if err == io.EOF { + break loop + } + if err != nil { + return nil, err + } + switch { + case ch == '+' || ch == '-': + if len(unscaled) > 0 || dp >= 0 { // must be first character + r.UnreadRune() + break loop + } + case ch == '.': + if dp >= 0 { + r.UnreadRune() + break loop + } + dp = len(unscaled) + continue // don't add to unscaled + case ch >= '0' && ch <= '9': + if dg == -1 { + dg = len(unscaled) + } + default: + r.UnreadRune() + break loop + } + unscaled = append(unscaled, byte(ch)) + } + if dg == -1 { + return nil, fmt.Errorf("no digits read") + } + if dp >= 0 { + z.SetScale(Scale(len(unscaled) - dp)) + } else { + z.SetScale(0) + } + _, ok := z.UnscaledBig().SetString(string(unscaled), 10) + if !ok { + return nil, fmt.Errorf("invalid decimal: %s", string(unscaled)) + } + return z, nil +} + +// SetString sets z to the value of s, interpreted as a decimal (base 10), +// and returns z and a boolean indicating success. The scale of z is the +// number of digits after the decimal point (including any trailing 0s), +// or 0 if there is no decimal point. If SetString fails, the value of z +// is undefined but the returned value is nil. +func (z *Dec) SetString(s string) (*Dec, bool) { + r := strings.NewReader(s) + _, err := z.scan(r) + if err != nil { + return nil, false + } + _, _, err = r.ReadRune() + if err != io.EOF { + return nil, false + } + // err == io.EOF => scan consumed all of s + return z, true +} + +// Scan is a support routine for fmt.Scanner; it sets z to the value of +// the scanned number. It accepts the decimal formats 'd' and 'f', and +// handles both equivalently. Bases 2, 8, 16 are not supported. +// The scale of z is the number of digits after the decimal point +// (including any trailing 0s), or 0 if there is no decimal point. +func (z *Dec) Scan(s fmt.ScanState, ch rune) error { + if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' { + return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch) + } + s.SkipSpace() + _, err := z.scan(s) + return err +} + +// Gob encoding version +const decGobVersion byte = 1 + +func scaleBytes(s Scale) []byte { + buf := make([]byte, scaleSize) + i := scaleSize + for j := 0; j < scaleSize; j++ { + i-- + buf[i] = byte(s) + s >>= 8 + } + return buf +} + +func scale(b []byte) (s Scale) { + for j := 0; j < scaleSize; j++ { + s <<= 8 + s |= Scale(b[j]) + } + return +} + +// GobEncode implements the gob.GobEncoder interface. +func (x *Dec) GobEncode() ([]byte, error) { + buf, err := x.UnscaledBig().GobEncode() + if err != nil { + return nil, err + } + buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion) + return buf, nil +} + +// GobDecode implements the gob.GobDecoder interface. +func (z *Dec) GobDecode(buf []byte) error { + if len(buf) == 0 { + return fmt.Errorf("Dec.GobDecode: no data") + } + b := buf[len(buf)-1] + if b != decGobVersion { + return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b) + } + l := len(buf) - scaleSize - 1 + err := z.UnscaledBig().GobDecode(buf[:l]) + if err != nil { + return err + } + z.SetScale(scale(buf[l : l+scaleSize])) + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (x *Dec) MarshalText() ([]byte, error) { + return []byte(x.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (z *Dec) UnmarshalText(data []byte) error { + _, ok := z.SetString(string(data)) + if !ok { + return fmt.Errorf("invalid inf.Dec") + } + return nil +} diff --git a/vendor/gopkg.in/inf.v0/rounder.go b/vendor/gopkg.in/inf.v0/rounder.go new file mode 100644 index 000000000000..3a97ef529b97 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/rounder.go @@ -0,0 +1,145 @@ +package inf + +import ( + "math/big" +) + +// Rounder represents a method for rounding the (possibly infinite decimal) +// result of a division to a finite Dec. It is used by Dec.Round() and +// Dec.Quo(). +// +// See the Example for results of using each Rounder with some sample values. +// +type Rounder rounder + +// See http://speleotrove.com/decimal/damodel.html#refround for more detailed +// definitions of these rounding modes. +var ( + RoundDown Rounder // towards 0 + RoundUp Rounder // away from 0 + RoundFloor Rounder // towards -infinity + RoundCeil Rounder // towards +infinity + RoundHalfDown Rounder // to nearest; towards 0 if same distance + RoundHalfUp Rounder // to nearest; away from 0 if same distance + RoundHalfEven Rounder // to nearest; even last digit if same distance +) + +// RoundExact is to be used in the case when rounding is not necessary. +// When used with Quo or Round, it returns the result verbatim when it can be +// expressed exactly with the given precision, and it returns nil otherwise. +// QuoExact is a shorthand for using Quo with RoundExact. +var RoundExact Rounder + +type rounder interface { + + // When UseRemainder() returns true, the Round() method is passed the + // remainder of the division, expressed as the numerator and denominator of + // a rational. + UseRemainder() bool + + // Round sets the rounded value of a quotient to z, and returns z. + // quo is rounded down (truncated towards zero) to the scale obtained from + // the Scaler in Quo(). + // + // When the remainder is not used, remNum and remDen are nil. + // When used, the remainder is normalized between -1 and 1; that is: + // + // -|remDen| < remNum < |remDen| + // + // remDen has the same sign as y, and remNum is zero or has the same sign + // as x. + Round(z, quo *Dec, remNum, remDen *big.Int) *Dec +} + +type rndr struct { + useRem bool + round func(z, quo *Dec, remNum, remDen *big.Int) *Dec +} + +func (r rndr) UseRemainder() bool { + return r.useRem +} + +func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec { + return r.round(z, quo, remNum, remDen) +} + +var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)} + +func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec { + return func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + brA, brB := rA.BitLen(), rB.BitLen() + if brA < brB-1 { + // brA < brB-1 => |rA| < |rB/2| + return z + } + roundUp := false + srA, srB := rA.Sign(), rB.Sign() + s := srA * srB + if brA == brB-1 { + rA2 := new(big.Int).Lsh(rA, 1) + if s < 0 { + rA2.Neg(rA2) + } + roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0)) + } else { + // brA > brB-1 => |rA| > |rB/2| + roundUp = true + } + if roundUp { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1]) + } + return z + } +} + +func init() { + RoundExact = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + if rA.Sign() != 0 { + return nil + } + return z.Set(q) + }} + RoundDown = rndr{false, + func(z, q *Dec, rA, rB *big.Int) *Dec { + return z.Set(q) + }} + RoundUp = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign() != 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1]) + } + return z + }} + RoundFloor = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign()*rB.Sign() < 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[0]) + } + return z + }} + RoundCeil = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign()*rB.Sign() > 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[2]) + } + return z + }} + RoundHalfDown = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c > 0 + })} + RoundHalfUp = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c >= 0 + })} + RoundHalfEven = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c > 0 || c == 0 && odd == 1 + })} +} diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS new file mode 100644 index 000000000000..c430067f3573 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -0,0 +1,16 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- derekwaynecarr +- mikedanese +- saad-ali +- janetkuo +- tallclair +- eparis +- jbeda +- xiang90 +- mbohlool +- david-mcmahon +- goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go new file mode 100644 index 000000000000..a8866a43e10b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -0,0 +1,299 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math/big" + "strconv" + + inf "gopkg.in/inf.v0" +) + +// Scale is used for getting and setting the base-10 scaled value. +// Base-2 scales are omitted for mathematical simplicity. +// See Quantity.ScaledValue for more details. +type Scale int32 + +// infScale adapts a Scale value to an inf.Scale value. +func (s Scale) infScale() inf.Scale { + return inf.Scale(-s) // inf.Scale is upside-down +} + +const ( + Nano Scale = -9 + Micro Scale = -6 + Milli Scale = -3 + Kilo Scale = 3 + Mega Scale = 6 + Giga Scale = 9 + Tera Scale = 12 + Peta Scale = 15 + Exa Scale = 18 +) + +var ( + Zero = int64Amount{} + + // Used by quantity strings - treat as read only + zeroBytes = []byte("0") +) + +// int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster +// than operations on inf.Dec for values that can be represented as int64. +// +k8s:openapi-gen=true +type int64Amount struct { + value int64 + scale Scale +} + +// Sign returns 0 if the value is zero, -1 if it is less than 0, or 1 if it is greater than 0. +func (a int64Amount) Sign() int { + switch { + case a.value == 0: + return 0 + case a.value > 0: + return 1 + default: + return -1 + } +} + +// AsInt64 returns the current amount as an int64 at scale 0, or false if the value cannot be +// represented in an int64 OR would result in a loss of precision. This method is intended as +// an optimization to avoid calling AsDec. +func (a int64Amount) AsInt64() (int64, bool) { + if a.scale == 0 { + return a.value, true + } + if a.scale < 0 { + // TODO: attempt to reduce factors, although it is assumed that factors are reduced prior + // to the int64Amount being created. + return 0, false + } + return positiveScaleInt64(a.value, a.scale) +} + +// AsScaledInt64 returns an int64 representing the value of this amount at the specified scale, +// rounding up, or false if that would result in overflow. (1e20).AsScaledInt64(1) would result +// in overflow because 1e19 is not representable as an int64. Note that setting a scale larger +// than the current value may result in loss of precision - i.e. (1e-6).AsScaledInt64(0) would +// return 1, because 0.000001 is rounded up to 1. +func (a int64Amount) AsScaledInt64(scale Scale) (result int64, ok bool) { + if a.scale < scale { + result, _ = negativeScaleInt64(a.value, scale-a.scale) + return result, true + } + return positiveScaleInt64(a.value, a.scale-scale) +} + +// AsDec returns an inf.Dec representation of this value. +func (a int64Amount) AsDec() *inf.Dec { + var base inf.Dec + base.SetUnscaled(a.value) + base.SetScale(inf.Scale(-a.scale)) + return &base +} + +// Cmp returns 0 if a and b are equal, 1 if a is greater than b, or -1 if a is less than b. +func (a int64Amount) Cmp(b int64Amount) int { + switch { + case a.scale == b.scale: + // compare only the unscaled portion + case a.scale > b.scale: + result, remainder, exact := divideByScaleInt64(b.value, a.scale-b.scale) + if !exact { + return a.AsDec().Cmp(b.AsDec()) + } + if result == a.value { + switch { + case remainder == 0: + return 0 + case remainder > 0: + return -1 + default: + return 1 + } + } + b.value = result + default: + result, remainder, exact := divideByScaleInt64(a.value, b.scale-a.scale) + if !exact { + return a.AsDec().Cmp(b.AsDec()) + } + if result == b.value { + switch { + case remainder == 0: + return 0 + case remainder > 0: + return 1 + default: + return -1 + } + } + a.value = result + } + + switch { + case a.value == b.value: + return 0 + case a.value < b.value: + return -1 + default: + return 1 + } +} + +// Add adds two int64Amounts together, matching scales. It will return false and not mutate +// a if overflow or underflow would result. +func (a *int64Amount) Add(b int64Amount) bool { + switch { + case b.value == 0: + return true + case a.value == 0: + a.value = b.value + a.scale = b.scale + return true + case a.scale == b.scale: + c, ok := int64Add(a.value, b.value) + if !ok { + return false + } + a.value = c + case a.scale > b.scale: + c, ok := positiveScaleInt64(a.value, a.scale-b.scale) + if !ok { + return false + } + c, ok = int64Add(c, b.value) + if !ok { + return false + } + a.scale = b.scale + a.value = c + default: + c, ok := positiveScaleInt64(b.value, b.scale-a.scale) + if !ok { + return false + } + c, ok = int64Add(a.value, c) + if !ok { + return false + } + a.value = c + } + return true +} + +// Sub removes the value of b from the current amount, or returns false if underflow would result. +func (a *int64Amount) Sub(b int64Amount) bool { + return a.Add(int64Amount{value: -b.value, scale: b.scale}) +} + +// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision +// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. +func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { + if a.scale >= scale { + return a, true + } + result, exact := negativeScaleInt64(a.value, scale-a.scale) + return int64Amount{value: result, scale: scale}, exact +} + +// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted +// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. +func (a int64Amount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + mantissa := a.value + exponent = int32(a.scale) + + amount, times := removeInt64Factors(mantissa, 10) + exponent += int32(times) + + // make sure exponent is a multiple of 3 + var ok bool + switch exponent % 3 { + case 1, -2: + amount, ok = int64MultiplyScale10(amount) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) + } + exponent = exponent - 1 + case 2, -1: + amount, ok = int64MultiplyScale100(amount) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) + } + exponent = exponent - 2 + } + return strconv.AppendInt(out, amount, 10), exponent +} + +// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would +// return []byte("2048"), 1. +func (a int64Amount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { + value, ok := a.AsScaledInt64(0) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBase1024Bytes(out) + } + amount, exponent := removeInt64Factors(value, 1024) + return strconv.AppendInt(out, amount, 10), exponent +} + +// infDecAmount implements common operations over an inf.Dec that are specific to the quantity +// representation. +type infDecAmount struct { + *inf.Dec +} + +// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision +// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. +func (a infDecAmount) AsScale(scale Scale) (infDecAmount, bool) { + tmp := &inf.Dec{} + tmp.Round(a.Dec, scale.infScale(), inf.RoundUp) + return infDecAmount{tmp}, tmp.Cmp(a.Dec) == 0 +} + +// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted +// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. +func (a infDecAmount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + mantissa := a.Dec.UnscaledBig() + exponent = int32(-a.Dec.Scale()) + amount := big.NewInt(0).Set(mantissa) + // move all factors of 10 into the exponent for easy reasoning + amount, times := removeBigIntFactors(amount, bigTen) + exponent += times + + // make sure exponent is a multiple of 3 + for exponent%3 != 0 { + amount.Mul(amount, bigTen) + exponent-- + } + + return append(out, amount.String()...), exponent +} + +// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would +// return []byte("2048"), 1. +func (a infDecAmount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { + tmp := &inf.Dec{} + tmp.Round(a.Dec, 0, inf.RoundUp) + amount, exponent := removeBigIntFactors(tmp.UnscaledBig(), big1024) + return append(out, amount.String()...), exponent +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go new file mode 100644 index 000000000000..9d7835bc23f9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto + +/* +Package resource is a generated protocol buffer package. + +It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto + +It has these top-level messages: + Quantity +*/ +package resource + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func init() { + proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 237 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30, + 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b, + 0xc5, 0xc8, 0xce, 0x00, 0x23, 0x5b, 0x92, 0x1e, 0xae, 0x15, 0xd5, 0x8e, 0x2e, 0x36, 0x52, 0xb7, + 0x8e, 0x8c, 0x1d, 0x19, 0x9b, 0xbf, 0xe9, 0xd8, 0xb1, 0x03, 0x03, 0x31, 0x3f, 0x82, 0xea, 0x36, + 0x52, 0xb7, 0x7b, 0xef, 0xf4, 0x4e, 0x97, 0xbd, 0xd4, 0xd3, 0x56, 0x1a, 0xa7, 0xea, 0x50, 0x12, + 0x5b, 0xf2, 0xd4, 0xaa, 0x4f, 0xb2, 0x33, 0xc7, 0xea, 0xb4, 0x28, 0x1a, 0xb3, 0x28, 0xaa, 0xb9, + 0xb1, 0xc4, 0x4b, 0xd5, 0xd4, 0xfa, 0x20, 0x14, 0x53, 0xeb, 0x02, 0x57, 0xa4, 0x34, 0x59, 0xe2, + 0xc2, 0xd3, 0x4c, 0x36, 0xec, 0xbc, 0x1b, 0xdf, 0x1f, 0x2b, 0x79, 0x5e, 0xc9, 0xa6, 0xd6, 0x07, + 0x21, 0x87, 0xea, 0xf6, 0x51, 0x1b, 0x3f, 0x0f, 0xa5, 0xac, 0xdc, 0x42, 0x69, 0xa7, 0x9d, 0x4a, + 0x71, 0x19, 0x3e, 0x12, 0x25, 0x48, 0xd3, 0xf1, 0xe8, 0xdd, 0x34, 0x1b, 0xbd, 0x86, 0xc2, 0x7a, + 0xe3, 0x97, 0xe3, 0xeb, 0xec, 0xa2, 0xf5, 0x6c, 0xac, 0xbe, 0x11, 0x13, 0xf1, 0x70, 0xf9, 0x76, + 0xa2, 0xa7, 0xab, 0xef, 0x4d, 0x0e, 0x5f, 0x5d, 0x0e, 0xeb, 0x2e, 0x87, 0x4d, 0x97, 0xc3, 0xea, + 0x67, 0x02, 0xcf, 0x72, 0xdb, 0x23, 0xec, 0x7a, 0x84, 0x7d, 0x8f, 0xb0, 0x8a, 0x28, 0xb6, 0x11, + 0xc5, 0x2e, 0xa2, 0xd8, 0x47, 0x14, 0xbf, 0x11, 0xc5, 0xfa, 0x0f, 0xe1, 0x7d, 0x34, 0x3c, 0xf6, + 0x1f, 0x00, 0x00, 0xff, 0xff, 0x3c, 0x08, 0x88, 0x49, 0x0e, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto new file mode 100644 index 000000000000..acc904445228 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.api.resource; + +// Package-wide variables from generator "generated". +option go_package = "resource"; + +// Quantity is a fixed-point representation of a number. +// It provides convenient marshaling/unmarshaling in JSON and YAML, +// in addition to String() and Int64() accessors. +// +// The serialization format is: +// +// ::= +// (Note that may be empty, from the "" case in .) +// ::= 0 | 1 | ... | 9 +// ::= | +// ::= | . | . | . +// ::= "+" | "-" +// ::= | +// ::= | | +// ::= Ki | Mi | Gi | Ti | Pi | Ei +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// ::= m | "" | k | M | G | T | P | E +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// ::= "e" | "E" +// +// No matter which of the three exponent forms is used, no quantity may represent +// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal +// places. Numbers larger or more precise will be capped or rounded up. +// (E.g.: 0.1m will rounded up to 1m.) +// This may be extended in the future if we require larger or smaller quantities. +// +// When a Quantity is parsed from a string, it will remember the type of suffix +// it had, and will use the same type again when it is serialized. +// +// Before serializing, Quantity will be put in "canonical form". +// This means that Exponent/suffix will be adjusted up or down (with a +// corresponding increase or decrease in Mantissa) such that: +// a. No precision is lost +// b. No fractional digits will be emitted +// c. The exponent (or suffix) is as large as possible. +// The sign will be omitted unless the number is negative. +// +// Examples: +// 1.5 will be serialized as "1500m" +// 1.5Gi will be serialized as "1536Mi" +// +// Note that the quantity will NEVER be internally represented by a +// floating point number. That is the whole point of this exercise. +// +// Non-canonical values will still parse as long as they are well formed, +// but will be re-emitted in their canonical form. (So always use canonical +// form, or don't diff.) +// +// This format is intended to make it difficult to use these numbers without +// writing some sort of special handling code in the hopes that that will +// cause implementors to also use a fixed point implementation. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +message Quantity { + optional string string = 1; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go new file mode 100644 index 000000000000..72d3880c0281 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -0,0 +1,314 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math/big" + + inf "gopkg.in/inf.v0" +) + +const ( + // maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64. + // It is also the maximum decimal digits that can be represented with an int64. + maxInt64Factors = 18 +) + +var ( + // Commonly needed big.Int values-- treat as read only! + bigTen = big.NewInt(10) + bigZero = big.NewInt(0) + bigOne = big.NewInt(1) + bigThousand = big.NewInt(1000) + big1024 = big.NewInt(1024) + + // Commonly needed inf.Dec values-- treat as read only! + decZero = inf.NewDec(0, 0) + decOne = inf.NewDec(1, 0) + decMinusOne = inf.NewDec(-1, 0) + decThousand = inf.NewDec(1000, 0) + dec1024 = inf.NewDec(1024, 0) + decMinus1024 = inf.NewDec(-1024, 0) + + // Largest (in magnitude) number allowed. + maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64 + + // The maximum value we can represent milli-units for. + // Compare with the return value of Quantity.Value() to + // see if it's safe to use Quantity.MilliValue(). + MaxMilliValue = int64(((1 << 63) - 1) / 1000) +) + +const mostNegative = -(mostPositive + 1) +const mostPositive = 1<<63 - 1 + +// int64Add returns a+b, or false if that would overflow int64. +func int64Add(a, b int64) (int64, bool) { + c := a + b + switch { + case a > 0 && b > 0: + if c < 0 { + return 0, false + } + case a < 0 && b < 0: + if c > 0 { + return 0, false + } + if a == mostNegative && b == mostNegative { + return 0, false + } + } + return c, true +} + +// int64Multiply returns a*b, or false if that would overflow or underflow int64. +func int64Multiply(a, b int64) (int64, bool) { + if a == 0 || b == 0 || a == 1 || b == 1 { + return a * b, true + } + if a == mostNegative || b == mostNegative { + return 0, false + } + c := a * b + return c, c/b == a +} + +// int64MultiplyScale returns a*b, assuming b is greater than one, or false if that would overflow or underflow int64. +// Use when b is known to be greater than one. +func int64MultiplyScale(a int64, b int64) (int64, bool) { + if a == 0 || a == 1 { + return a * b, true + } + if a == mostNegative && b != 1 { + return 0, false + } + c := a * b + return c, c/b == a +} + +// int64MultiplyScale10 multiplies a by 10, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 10) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale10(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 10, true + } + if a == mostNegative { + return 0, false + } + c := a * 10 + return c, c/10 == a +} + +// int64MultiplyScale100 multiplies a by 100, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 100) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale100(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 100, true + } + if a == mostNegative { + return 0, false + } + c := a * 100 + return c, c/100 == a +} + +// int64MultiplyScale1000 multiplies a by 1000, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 1000) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale1000(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 1000, true + } + if a == mostNegative { + return 0, false + } + c := a * 1000 + return c, c/1000 == a +} + +// positiveScaleInt64 multiplies base by 10^scale, returning false if the +// value overflows. Passing a negative scale is undefined. +func positiveScaleInt64(base int64, scale Scale) (int64, bool) { + switch scale { + case 0: + return base, true + case 1: + return int64MultiplyScale10(base) + case 2: + return int64MultiplyScale100(base) + case 3: + return int64MultiplyScale1000(base) + case 6: + return int64MultiplyScale(base, 1000000) + case 9: + return int64MultiplyScale(base, 1000000000) + default: + value := base + var ok bool + for i := Scale(0); i < scale; i++ { + if value, ok = int64MultiplyScale(value, 10); !ok { + return 0, false + } + } + return value, true + } +} + +// negativeScaleInt64 reduces base by the provided scale, rounding up, until the +// value is zero or the scale is reached. Passing a negative scale is undefined. +// The value returned, if not exact, is rounded away from zero. +func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { + if scale == 0 { + return base, true + } + + value := base + var fraction bool + for i := Scale(0); i < scale; i++ { + if !fraction && value%10 != 0 { + fraction = true + } + value = value / 10 + if value == 0 { + if fraction { + if base > 0 { + return 1, false + } + return -1, false + } + return 0, true + } + } + if fraction { + if base > 0 { + value += 1 + } else { + value += -1 + } + } + return value, !fraction +} + +func pow10Int64(b int64) int64 { + switch b { + case 0: + return 1 + case 1: + return 10 + case 2: + return 100 + case 3: + return 1000 + case 4: + return 10000 + case 5: + return 100000 + case 6: + return 1000000 + case 7: + return 10000000 + case 8: + return 100000000 + case 9: + return 1000000000 + case 10: + return 10000000000 + case 11: + return 100000000000 + case 12: + return 1000000000000 + case 13: + return 10000000000000 + case 14: + return 100000000000000 + case 15: + return 1000000000000000 + case 16: + return 10000000000000000 + case 17: + return 100000000000000000 + case 18: + return 1000000000000000000 + default: + return 0 + } +} + +// negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or +// false if no such division is possible. Dividing by negative scales is undefined. +func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) { + if scale == 0 { + return base, 0, true + } + // the max scale representable in base 10 in an int64 is 18 decimal places + if scale >= 18 { + return 0, base, false + } + divisor := pow10Int64(int64(scale)) + return base / divisor, base % divisor, true +} + +// removeInt64Factors divides in a loop; the return values have the property that +// value == result * base ^ scale +func removeInt64Factors(value int64, base int64) (result int64, times int32) { + times = 0 + result = value + negative := result < 0 + if negative { + result = -result + } + switch base { + // allow the compiler to optimize the common cases + case 10: + for result >= 10 && result%10 == 0 { + times++ + result = result / 10 + } + // allow the compiler to optimize the common cases + case 1024: + for result >= 1024 && result%1024 == 0 { + times++ + result = result / 1024 + } + default: + for result >= base && result%base == 0 { + times++ + result = result / base + } + } + if negative { + result = -result + } + return result, times +} + +// removeBigIntFactors divides in a loop; the return values have the property that +// d == result * factor ^ times +// d may be modified in place. +// If d == 0, then the return values will be (0, 0) +func removeBigIntFactors(d, factor *big.Int) (result *big.Int, times int32) { + q := big.NewInt(0) + m := big.NewInt(0) + for d.Cmp(bigZero) != 0 { + q.DivMod(d, factor, m) + if m.Cmp(bigZero) != 0 { + break + } + times++ + d, q = q, d + } + return d, times +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go new file mode 100644 index 000000000000..b155a62a45af --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -0,0 +1,738 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "errors" + "fmt" + "math/big" + "strconv" + "strings" + + inf "gopkg.in/inf.v0" +) + +// Quantity is a fixed-point representation of a number. +// It provides convenient marshaling/unmarshaling in JSON and YAML, +// in addition to String() and Int64() accessors. +// +// The serialization format is: +// +// ::= +// (Note that may be empty, from the "" case in .) +// ::= 0 | 1 | ... | 9 +// ::= | +// ::= | . | . | . +// ::= "+" | "-" +// ::= | +// ::= | | +// ::= Ki | Mi | Gi | Ti | Pi | Ei +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// ::= m | "" | k | M | G | T | P | E +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// ::= "e" | "E" +// +// No matter which of the three exponent forms is used, no quantity may represent +// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal +// places. Numbers larger or more precise will be capped or rounded up. +// (E.g.: 0.1m will rounded up to 1m.) +// This may be extended in the future if we require larger or smaller quantities. +// +// When a Quantity is parsed from a string, it will remember the type of suffix +// it had, and will use the same type again when it is serialized. +// +// Before serializing, Quantity will be put in "canonical form". +// This means that Exponent/suffix will be adjusted up or down (with a +// corresponding increase or decrease in Mantissa) such that: +// a. No precision is lost +// b. No fractional digits will be emitted +// c. The exponent (or suffix) is as large as possible. +// The sign will be omitted unless the number is negative. +// +// Examples: +// 1.5 will be serialized as "1500m" +// 1.5Gi will be serialized as "1536Mi" +// +// Note that the quantity will NEVER be internally represented by a +// floating point number. That is the whole point of this exercise. +// +// Non-canonical values will still parse as long as they are well formed, +// but will be re-emitted in their canonical form. (So always use canonical +// form, or don't diff.) +// +// This format is intended to make it difficult to use these numbers without +// writing some sort of special handling code in the hopes that that will +// cause implementors to also use a fixed point implementation. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +type Quantity struct { + // i is the quantity in int64 scaled form, if d.Dec == nil + i int64Amount + // d is the quantity in inf.Dec form if d.Dec != nil + d infDecAmount + // s is the generated value of this quantity to avoid recalculation + s string + + // Change Format at will. See the comment for Canonicalize for + // more details. + Format +} + +// CanonicalValue allows a quantity amount to be converted to a string. +type CanonicalValue interface { + // AsCanonicalBytes returns a byte array representing the string representation + // of the value mantissa and an int32 representing its exponent in base-10. Callers may + // pass a byte slice to the method to avoid allocations. + AsCanonicalBytes(out []byte) ([]byte, int32) + // AsCanonicalBase1024Bytes returns a byte array representing the string representation + // of the value mantissa and an int32 representing its exponent in base-1024. Callers + // may pass a byte slice to the method to avoid allocations. + AsCanonicalBase1024Bytes(out []byte) ([]byte, int32) +} + +// Format lists the three possible formattings of a quantity. +type Format string + +const ( + DecimalExponent = Format("DecimalExponent") // e.g., 12e6 + BinarySI = Format("BinarySI") // e.g., 12Mi (12 * 2^20) + DecimalSI = Format("DecimalSI") // e.g., 12M (12 * 10^6) +) + +// MustParse turns the given string into a quantity or panics; for tests +// or others cases where you know the string is valid. +func MustParse(str string) Quantity { + q, err := ParseQuantity(str) + if err != nil { + panic(fmt.Errorf("cannot parse '%v': %v", str, err)) + } + return q +} + +const ( + // splitREString is used to separate a number from its suffix; as such, + // this is overly permissive, but that's OK-- it will be checked later. + splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$" +) + +var ( + // Errors that could happen while parsing a string. + ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'") + ErrNumeric = errors.New("unable to parse numeric part of quantity") + ErrSuffix = errors.New("unable to parse quantity's suffix") +) + +// parseQuantityString is a fast scanner for quantity values. +func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) { + positive = true + pos := 0 + end := len(str) + + // handle leading sign + if pos < end { + switch str[0] { + case '-': + positive = false + pos++ + case '+': + pos++ + } + } + + // strip leading zeros +Zeroes: + for i := pos; ; i++ { + if i >= end { + num = "0" + value = num + return + } + switch str[i] { + case '0': + pos++ + default: + break Zeroes + } + } + + // extract the numerator +Num: + for i := pos; ; i++ { + if i >= end { + num = str[pos:end] + value = str[0:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + num = str[pos:i] + pos = i + break Num + } + } + + // if we stripped all numerator positions, always return 0 + if len(num) == 0 { + num = "0" + } + + // handle a denominator + if pos < end && str[pos] == '.' { + pos++ + Denom: + for i := pos; ; i++ { + if i >= end { + denom = str[pos:end] + value = str[0:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + denom = str[pos:i] + pos = i + break Denom + } + } + // TODO: we currently allow 1.G, but we may not want to in the future. + // if len(denom) == 0 { + // err = ErrFormatWrong + // return + // } + } + value = str[0:pos] + + // grab the elements of the suffix + suffixStart := pos + for i := pos; ; i++ { + if i >= end { + suffix = str[suffixStart:end] + return + } + if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") { + pos = i + break + } + } + if pos < end { + switch str[pos] { + case '-', '+': + pos++ + } + } +Suffix: + for i := pos; ; i++ { + if i >= end { + suffix = str[suffixStart:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + break Suffix + } + } + // we encountered a non decimal in the Suffix loop, but the last character + // was not a valid exponent + err = ErrFormatWrong + return +} + +// ParseQuantity turns str into a Quantity, or returns an error. +func ParseQuantity(str string) (Quantity, error) { + if len(str) == 0 { + return Quantity{}, ErrFormatWrong + } + if str == "0" { + return Quantity{Format: DecimalSI, s: str}, nil + } + + positive, value, num, denom, suf, err := parseQuantityString(str) + if err != nil { + return Quantity{}, err + } + + base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf)) + if !ok { + return Quantity{}, ErrSuffix + } + + precision := int32(0) + scale := int32(0) + mantissa := int64(1) + switch format { + case DecimalExponent, DecimalSI: + scale = exponent + precision = maxInt64Factors - int32(len(num)+len(denom)) + case BinarySI: + scale = 0 + switch { + case exponent >= 0 && len(denom) == 0: + // only handle positive binary numbers with the fast path + mantissa = int64(int64(mantissa) << uint64(exponent)) + // 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision + precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1 + default: + precision = -1 + } + } + + if precision >= 0 { + // if we have a denominator, shift the entire value to the left by the number of places in the + // denominator + scale -= int32(len(denom)) + if scale >= int32(Nano) { + shifted := num + denom + + var value int64 + value, err := strconv.ParseInt(shifted, 10, 64) + if err != nil { + return Quantity{}, ErrNumeric + } + if result, ok := int64Multiply(value, int64(mantissa)); ok { + if !positive { + result = -result + } + // if the number is in canonical form, reuse the string + switch format { + case BinarySI: + if exponent%10 == 0 && (value&0x07 != 0) { + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil + } + default: + if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' { + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil + } + } + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil + } + } + } + + amount := new(inf.Dec) + if _, ok := amount.SetString(value); !ok { + return Quantity{}, ErrNumeric + } + + // So that no one but us has to think about suffixes, remove it. + if base == 10 { + amount.SetScale(amount.Scale() + Scale(exponent).infScale()) + } else if base == 2 { + // numericSuffix = 2 ** exponent + numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent)) + ub := amount.UnscaledBig() + amount.SetUnscaledBig(ub.Mul(ub, numericSuffix)) + } + + // Cap at min/max bounds. + sign := amount.Sign() + if sign == -1 { + amount.Neg(amount) + } + + // This rounds non-zero values up to the minimum representable value, under the theory that + // if you want some resources, you should get some resources, even if you asked for way too small + // of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have + // the side effect of rounding values < .5n to zero. + if v, ok := amount.Unscaled(); v != int64(0) || !ok { + amount.Round(amount, Nano.infScale(), inf.RoundUp) + } + + // The max is just a simple cap. + // TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster + if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 { + amount.Set(maxAllowed.Dec) + } + + if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 { + // This avoids rounding and hopefully confusion, too. + format = DecimalSI + } + if sign == -1 { + amount.Neg(amount) + } + + return Quantity{d: infDecAmount{amount}, Format: format}, nil +} + +// DeepCopy returns a deep-copy of the Quantity value. Note that the method +// receiver is a value, so we can mutate it in-place and return it. +func (q Quantity) DeepCopy() Quantity { + if q.d.Dec != nil { + tmp := &inf.Dec{} + q.d.Dec = tmp.Set(q.d.Dec) + } + return q +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Quantity) OpenAPISchemaFormat() string { return "" } + +// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). +// +// Note about BinarySI: +// * If q.Format is set to BinarySI and q.Amount represents a non-zero value between +// -1 and +1, it will be emitted as if q.Format were DecimalSI. +// * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be +// rounded up. (1.1i becomes 2i.) +func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { + if q.IsZero() { + return zeroBytes, nil + } + + var rounded CanonicalValue + format := q.Format + switch format { + case DecimalExponent, DecimalSI: + case BinarySI: + if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 { + // This avoids rounding and hopefully confusion, too. + format = DecimalSI + } else { + var exact bool + if rounded, exact = q.AsScale(0); !exact { + // Don't lose precision-- show as DecimalSI + format = DecimalSI + } + } + default: + format = DecimalExponent + } + + // TODO: If BinarySI formatting is requested but would cause rounding, upgrade to + // one of the other formats. + switch format { + case DecimalExponent, DecimalSI: + number, exponent := q.AsCanonicalBytes(out) + suffix, _ := quantitySuffixer.constructBytes(10, exponent, format) + return number, suffix + default: + // format must be BinarySI + number, exponent := rounded.AsCanonicalBase1024Bytes(out) + suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format) + return number, suffix + } +} + +// AsInt64 returns a representation of the current value as an int64 if a fast conversion +// is possible. If false is returned, callers must use the inf.Dec form of this quantity. +func (q *Quantity) AsInt64() (int64, bool) { + if q.d.Dec != nil { + return 0, false + } + return q.i.AsInt64() +} + +// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself. +func (q *Quantity) ToDec() *Quantity { + if q.d.Dec == nil { + q.d.Dec = q.i.AsDec() + q.i = int64Amount{} + } + return q +} + +// AsDec returns the quantity as represented by a scaled inf.Dec. +func (q *Quantity) AsDec() *inf.Dec { + if q.d.Dec != nil { + return q.d.Dec + } + q.d.Dec = q.i.AsDec() + q.i = int64Amount{} + return q.d.Dec +} + +// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa +// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra +// allocation. +func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + if q.d.Dec != nil { + return q.d.AsCanonicalBytes(out) + } + return q.i.AsCanonicalBytes(out) +} + +// IsZero returns true if the quantity is equal to zero. +func (q *Quantity) IsZero() bool { + if q.d.Dec != nil { + return q.d.Dec.Sign() == 0 + } + return q.i.value == 0 +} + +// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the +// quantity is greater than zero. +func (q *Quantity) Sign() int { + if q.d.Dec != nil { + return q.d.Dec.Sign() + } + return q.i.Sign() +} + +// AsScale returns the current value, rounded up to the provided scale, and returns +// false if the scale resulted in a loss of precision. +func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) { + if q.d.Dec != nil { + return q.d.AsScale(scale) + } + return q.i.AsScale(scale) +} + +// RoundUp updates the quantity to the provided scale, ensuring that the value is at +// least 1. False is returned if the rounding operation resulted in a loss of precision. +// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10). +func (q *Quantity) RoundUp(scale Scale) bool { + if q.d.Dec != nil { + q.s = "" + d, exact := q.d.AsScale(scale) + q.d = d + return exact + } + // avoid clearing the string value if we have already calculated it + if q.i.scale >= scale { + return true + } + q.s = "" + i, exact := q.i.AsScale(scale) + q.i = i + return exact +} + +// Add adds the provide y quantity to the current value. If the current value is zero, +// the format of the quantity will be updated to the format of y. +func (q *Quantity) Add(y Quantity) { + q.s = "" + if q.d.Dec == nil && y.d.Dec == nil { + if q.i.value == 0 { + q.Format = y.Format + } + if q.i.Add(y.i) { + return + } + } else if q.IsZero() { + q.Format = y.Format + } + q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec()) +} + +// Sub subtracts the provided quantity from the current value in place. If the current +// value is zero, the format of the quantity will be updated to the format of y. +func (q *Quantity) Sub(y Quantity) { + q.s = "" + if q.IsZero() { + q.Format = y.Format + } + if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) { + return + } + q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) +} + +// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the +// quantity is greater than y. +func (q *Quantity) Cmp(y Quantity) int { + if q.d.Dec == nil && y.d.Dec == nil { + return q.i.Cmp(y.i) + } + return q.AsDec().Cmp(y.AsDec()) +} + +// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the +// quantity is greater than y. +func (q *Quantity) CmpInt64(y int64) int { + if q.d.Dec != nil { + return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0))) + } + return q.i.Cmp(int64Amount{value: y}) +} + +// Neg sets quantity to be the negative value of itself. +func (q *Quantity) Neg() { + q.s = "" + if q.d.Dec == nil { + q.i.value = -q.i.value + return + } + q.d.Dec.Neg(q.d.Dec) +} + +// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation +// of most Quantity values. +const int64QuantityExpectedBytes = 18 + +// String formats the Quantity as a string, caching the result if not calculated. +// String is an expensive operation and caching this result significantly reduces the cost of +// normal parse / marshal operations on Quantity. +func (q *Quantity) String() string { + if len(q.s) == 0 { + result := make([]byte, 0, int64QuantityExpectedBytes) + number, suffix := q.CanonicalizeBytes(result) + number = append(number, suffix...) + q.s = string(number) + } + return q.s +} + +// MarshalJSON implements the json.Marshaller interface. +func (q Quantity) MarshalJSON() ([]byte, error) { + if len(q.s) > 0 { + out := make([]byte, len(q.s)+2) + out[0], out[len(out)-1] = '"', '"' + copy(out[1:], q.s) + return out, nil + } + result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes) + result[0] = '"' + number, suffix := q.CanonicalizeBytes(result[1:1]) + // if the same slice was returned to us that we passed in, avoid another allocation by copying number into + // the source slice and returning that + if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes { + number = append(number, suffix...) + number = append(number, '"') + return result[:1+len(number)], nil + } + // if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use + // append + result = result[:1] + result = append(result, number...) + result = append(result, suffix...) + result = append(result, '"') + return result, nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +// TODO: Remove support for leading/trailing whitespace +func (q *Quantity) UnmarshalJSON(value []byte) error { + l := len(value) + if l == 4 && bytes.Equal(value, []byte("null")) { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + if l >= 2 && value[0] == '"' && value[l-1] == '"' { + value = value[1 : l-1] + } + + parsed, err := ParseQuantity(strings.TrimSpace(string(value))) + if err != nil { + return err + } + + // This copy is safe because parsed will not be referred to again. + *q = parsed + return nil +} + +// NewQuantity returns a new Quantity representing the given +// value in the given format. +func NewQuantity(value int64, format Format) *Quantity { + return &Quantity{ + i: int64Amount{value: value}, + Format: format, + } +} + +// NewMilliQuantity returns a new Quantity representing the given +// value * 1/1000 in the given format. Note that BinarySI formatting +// will round fractional values, and will be changed to DecimalSI for +// values x where (-1 < x < 1) && (x != 0). +func NewMilliQuantity(value int64, format Format) *Quantity { + return &Quantity{ + i: int64Amount{value: value, scale: -3}, + Format: format, + } +} + +// NewScaledQuantity returns a new Quantity representing the given +// value * 10^scale in DecimalSI format. +func NewScaledQuantity(value int64, scale Scale) *Quantity { + return &Quantity{ + i: int64Amount{value: value, scale: scale}, + Format: DecimalSI, + } +} + +// Value returns the value of q; any fractional part will be lost. +func (q *Quantity) Value() int64 { + return q.ScaledValue(0) +} + +// MilliValue returns the value of ceil(q * 1000); this could overflow an int64; +// if that's a concern, call Value() first to verify the number is small enough. +func (q *Quantity) MilliValue() int64 { + return q.ScaledValue(Milli) +} + +// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64. +// To detect overflow, call Value() first and verify the expected magnitude. +func (q *Quantity) ScaledValue(scale Scale) int64 { + if q.d.Dec == nil { + i, _ := q.i.AsScaledInt64(scale) + return i + } + dec := q.d.Dec + return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale())) +} + +// Set sets q's value to be value. +func (q *Quantity) Set(value int64) { + q.SetScaled(value, 0) +} + +// SetMilli sets q's value to be value * 1/1000. +func (q *Quantity) SetMilli(value int64) { + q.SetScaled(value, Milli) +} + +// SetScaled sets q's value to be value * 10^scale +func (q *Quantity) SetScaled(value int64, scale Scale) { + q.s = "" + q.d.Dec = nil + q.i = int64Amount{value: value, scale: scale} +} + +// Copy is a convenience function that makes a deep copy for you. Non-deep +// copies of quantities share pointers and you will regret that. +func (q *Quantity) Copy() *Quantity { + if q.d.Dec == nil { + return &Quantity{ + s: q.s, + i: q.i, + Format: q.Format, + } + } + tmp := &inf.Dec{} + return &Quantity{ + s: q.s, + d: infDecAmount{tmp.Set(q.d.Dec)}, + Format: q.Format, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go new file mode 100644 index 000000000000..74dfb4e4b7c9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -0,0 +1,284 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "io" + + "github.com/gogo/protobuf/proto" +) + +var _ proto.Sizer = &Quantity{} + +func (m *Quantity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +// MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct +// with a single string field. +func (m *Quantity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + + data[i] = 0xa + i++ + // BEGIN CUSTOM MARSHAL + out := m.String() + i = encodeVarintGenerated(data, i, uint64(len(out))) + i += copy(data[i:], out) + // END CUSTOM MARSHAL + + return i, nil +} + +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} + +func (m *Quantity) Size() (n int) { + var l int + _ = l + + // BEGIN CUSTOM SIZE + l = len(m.String()) + // END CUSTOM SIZE + + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct +// with a single string field. +func (m *Quantity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quantity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quantity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + + // BEGIN CUSTOM DECODE + p, err := ParseQuantity(s) + if err != nil { + return err + } + *m = p + // END CUSTOM DECODE + + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go new file mode 100644 index 000000000000..55e177b0e9b0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go @@ -0,0 +1,95 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math" + "math/big" + "sync" +) + +var ( + // A sync pool to reduce allocation. + intPool sync.Pool + maxInt64 = big.NewInt(math.MaxInt64) +) + +func init() { + intPool.New = func() interface{} { + return &big.Int{} + } +} + +// scaledValue scales given unscaled value from scale to new Scale and returns +// an int64. It ALWAYS rounds up the result when scale down. The final result might +// overflow. +// +// scale, newScale represents the scale of the unscaled decimal. +// The mathematical value of the decimal is unscaled * 10**(-scale). +func scaledValue(unscaled *big.Int, scale, newScale int) int64 { + dif := scale - newScale + if dif == 0 { + return unscaled.Int64() + } + + // Handle scale up + // This is an easy case, we do not need to care about rounding and overflow. + // If any intermediate operation causes overflow, the result will overflow. + if dif < 0 { + return unscaled.Int64() * int64(math.Pow10(-dif)) + } + + // Handle scale down + // We have to be careful about the intermediate operations. + + // fast path when unscaled < max.Int64 and exp(10,dif) < max.Int64 + const log10MaxInt64 = 19 + if unscaled.Cmp(maxInt64) < 0 && dif < log10MaxInt64 { + divide := int64(math.Pow10(dif)) + result := unscaled.Int64() / divide + mod := unscaled.Int64() % divide + if mod != 0 { + return result + 1 + } + return result + } + + // We should only convert back to int64 when getting the result. + divisor := intPool.Get().(*big.Int) + exp := intPool.Get().(*big.Int) + result := intPool.Get().(*big.Int) + defer func() { + intPool.Put(divisor) + intPool.Put(exp) + intPool.Put(result) + }() + + // divisor = 10^(dif) + // TODO: create loop up table if exp costs too much. + divisor.Exp(bigTen, exp.SetInt64(int64(dif)), nil) + // reuse exp + remainder := exp + + // result = unscaled / divisor + // remainder = unscaled % divisor + result.DivMod(unscaled, divisor, remainder) + if remainder.Sign() != 0 { + return result.Int64() + 1 + } + + return result.Int64() +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go new file mode 100644 index 000000000000..5ed7abe66510 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go @@ -0,0 +1,198 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "strconv" +) + +type suffix string + +// suffixer can interpret and construct suffixes. +type suffixer interface { + interpret(suffix) (base, exponent int32, fmt Format, ok bool) + construct(base, exponent int32, fmt Format) (s suffix, ok bool) + constructBytes(base, exponent int32, fmt Format) (s []byte, ok bool) +} + +// quantitySuffixer handles suffixes for all three formats that quantity +// can handle. +var quantitySuffixer = newSuffixer() + +type bePair struct { + base, exponent int32 +} + +type listSuffixer struct { + suffixToBE map[suffix]bePair + beToSuffix map[bePair]suffix + beToSuffixBytes map[bePair][]byte +} + +func (ls *listSuffixer) addSuffix(s suffix, pair bePair) { + if ls.suffixToBE == nil { + ls.suffixToBE = map[suffix]bePair{} + } + if ls.beToSuffix == nil { + ls.beToSuffix = map[bePair]suffix{} + } + if ls.beToSuffixBytes == nil { + ls.beToSuffixBytes = map[bePair][]byte{} + } + ls.suffixToBE[s] = pair + ls.beToSuffix[pair] = s + ls.beToSuffixBytes[pair] = []byte(s) +} + +func (ls *listSuffixer) lookup(s suffix) (base, exponent int32, ok bool) { + pair, ok := ls.suffixToBE[s] + if !ok { + return 0, 0, false + } + return pair.base, pair.exponent, true +} + +func (ls *listSuffixer) construct(base, exponent int32) (s suffix, ok bool) { + s, ok = ls.beToSuffix[bePair{base, exponent}] + return +} + +func (ls *listSuffixer) constructBytes(base, exponent int32) (s []byte, ok bool) { + s, ok = ls.beToSuffixBytes[bePair{base, exponent}] + return +} + +type suffixHandler struct { + decSuffixes listSuffixer + binSuffixes listSuffixer +} + +type fastLookup struct { + *suffixHandler +} + +func (l fastLookup) interpret(s suffix) (base, exponent int32, format Format, ok bool) { + switch s { + case "": + return 10, 0, DecimalSI, true + case "n": + return 10, -9, DecimalSI, true + case "u": + return 10, -6, DecimalSI, true + case "m": + return 10, -3, DecimalSI, true + case "k": + return 10, 3, DecimalSI, true + case "M": + return 10, 6, DecimalSI, true + case "G": + return 10, 9, DecimalSI, true + } + return l.suffixHandler.interpret(s) +} + +func newSuffixer() suffixer { + sh := &suffixHandler{} + + // IMPORTANT: if you change this section you must change fastLookup + + sh.binSuffixes.addSuffix("Ki", bePair{2, 10}) + sh.binSuffixes.addSuffix("Mi", bePair{2, 20}) + sh.binSuffixes.addSuffix("Gi", bePair{2, 30}) + sh.binSuffixes.addSuffix("Ti", bePair{2, 40}) + sh.binSuffixes.addSuffix("Pi", bePair{2, 50}) + sh.binSuffixes.addSuffix("Ei", bePair{2, 60}) + // Don't emit an error when trying to produce + // a suffix for 2^0. + sh.decSuffixes.addSuffix("", bePair{2, 0}) + + sh.decSuffixes.addSuffix("n", bePair{10, -9}) + sh.decSuffixes.addSuffix("u", bePair{10, -6}) + sh.decSuffixes.addSuffix("m", bePair{10, -3}) + sh.decSuffixes.addSuffix("", bePair{10, 0}) + sh.decSuffixes.addSuffix("k", bePair{10, 3}) + sh.decSuffixes.addSuffix("M", bePair{10, 6}) + sh.decSuffixes.addSuffix("G", bePair{10, 9}) + sh.decSuffixes.addSuffix("T", bePair{10, 12}) + sh.decSuffixes.addSuffix("P", bePair{10, 15}) + sh.decSuffixes.addSuffix("E", bePair{10, 18}) + + return fastLookup{sh} +} + +func (sh *suffixHandler) construct(base, exponent int32, fmt Format) (s suffix, ok bool) { + switch fmt { + case DecimalSI: + return sh.decSuffixes.construct(base, exponent) + case BinarySI: + return sh.binSuffixes.construct(base, exponent) + case DecimalExponent: + if base != 10 { + return "", false + } + if exponent == 0 { + return "", true + } + return suffix("e" + strconv.FormatInt(int64(exponent), 10)), true + } + return "", false +} + +func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s []byte, ok bool) { + switch format { + case DecimalSI: + return sh.decSuffixes.constructBytes(base, exponent) + case BinarySI: + return sh.binSuffixes.constructBytes(base, exponent) + case DecimalExponent: + if base != 10 { + return nil, false + } + if exponent == 0 { + return nil, true + } + result := make([]byte, 8, 8) + result[0] = 'e' + number := strconv.AppendInt(result[1:1], int64(exponent), 10) + if &result[1] == &number[0] { + return result[:1+len(number)], true + } + result = append(result[:1], number...) + return result, true + } + return nil, false +} + +func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int32, fmt Format, ok bool) { + // Try lookup tables first + if b, e, ok := sh.decSuffixes.lookup(suffix); ok { + return b, e, DecimalSI, true + } + if b, e, ok := sh.binSuffixes.lookup(suffix); ok { + return b, e, BinarySI, true + } + + if len(suffix) > 1 && (suffix[0] == 'E' || suffix[0] == 'e') { + parsed, err := strconv.ParseInt(string(suffix[1:]), 10, 64) + if err != nil { + return 0, 0, DecimalExponent, false + } + return 10, int32(parsed), DecimalExponent, true + } + + return 0, 0, DecimalExponent, false +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go new file mode 100644 index 000000000000..ab47407900cf --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -0,0 +1,27 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package resource + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Quantity) DeepCopyInto(out *Quantity) { + *out = in.DeepCopy() + return +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS new file mode 100644 index 000000000000..cdb125a0dd47 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -0,0 +1,31 @@ +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- caesarxuchao +- liggitt +- nikhiljindal +- gmarek +- erictune +- davidopp +- sttts +- quinton-hoole +- luxas +- janetkuo +- justinsb +- ncdc +- soltysh +- dims +- madhusudancs +- hongchaodeng +- krousey +- mml +- mbohlool +- david-mcmahon +- therc +- mqliang +- kevin-wangzefeng +- jianhuiz +- feihujiang diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go new file mode 100644 index 000000000000..042cd5b9c558 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// IsControlledBy checks if the object has a controllerRef set to the given owner +func IsControlledBy(obj Object, owner Object) bool { + ref := GetControllerOf(obj) + if ref == nil { + return false + } + return ref.UID == owner.GetUID() +} + +// GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller +func GetControllerOf(controllee Object) *OwnerReference { + for _, ref := range controllee.GetOwnerReferences() { + if ref.Controller != nil && *ref.Controller { + return &ref + } + } + return nil +} + +// NewControllerRef creates an OwnerReference pointing to the given owner. +func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { + blockOwnerDeletion := true + isController := true + return &OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go new file mode 100644 index 000000000000..5c36f82c1277 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -0,0 +1,319 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func AddConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + Convert_v1_TypeMeta_To_v1_TypeMeta, + + Convert_v1_ListMeta_To_v1_ListMeta, + + Convert_intstr_IntOrString_To_intstr_IntOrString, + + Convert_Pointer_v1_Duration_To_v1_Duration, + Convert_v1_Duration_To_Pointer_v1_Duration, + + Convert_Slice_string_To_v1_Time, + + Convert_v1_Time_To_v1_Time, + Convert_v1_MicroTime_To_v1_MicroTime, + + Convert_resource_Quantity_To_resource_Quantity, + + Convert_string_To_labels_Selector, + Convert_labels_Selector_To_string, + + Convert_string_To_fields_Selector, + Convert_fields_Selector_To_string, + + Convert_Pointer_bool_To_bool, + Convert_bool_To_Pointer_bool, + + Convert_Pointer_string_To_string, + Convert_string_To_Pointer_string, + + Convert_Pointer_int64_To_int, + Convert_int_To_Pointer_int64, + + Convert_Pointer_int32_To_int32, + Convert_int32_To_Pointer_int32, + + Convert_Pointer_int64_To_int64, + Convert_int64_To_Pointer_int64, + + Convert_Pointer_float64_To_float64, + Convert_float64_To_Pointer_float64, + + Convert_Map_string_To_string_To_v1_LabelSelector, + Convert_v1_LabelSelector_To_Map_string_To_string, + + Convert_Slice_string_To_Slice_int32, + + Convert_Slice_string_To_v1_DeletionPropagation, + ) +} + +func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = float64(**in) + return nil +} + +func Convert_float64_To_Pointer_float64(in *float64, out **float64, s conversion.Scope) error { + temp := float64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int32_To_int32(in **int32, out *int32, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int32(**in) + return nil +} + +func Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) error { + temp := int32(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int64_To_int64(in **int64, out *int64, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int64(**in) + return nil +} + +func Convert_int64_To_Pointer_int64(in *int64, out **int64, s conversion.Scope) error { + temp := int64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int(**in) + return nil +} + +func Convert_int_To_Pointer_int64(in *int, out **int64, s conversion.Scope) error { + temp := int64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error { + if *in == nil { + *out = "" + return nil + } + *out = **in + return nil +} + +func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error { + if in == nil { + stringVar := "" + *out = &stringVar + return nil + } + *out = in + return nil +} + +func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error { + if *in == nil { + *out = false + return nil + } + *out = **in + return nil +} + +func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error { + if in == nil { + boolVar := false + *out = &boolVar + return nil + } + *out = in + return nil +} + +// +k8s:conversion-fn=drop +func Convert_v1_TypeMeta_To_v1_TypeMeta(in, out *TypeMeta, s conversion.Scope) error { + // These values are explicitly not copied + //out.APIVersion = in.APIVersion + //out.Kind = in.Kind + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_ListMeta_To_v1_ListMeta(in, out *ListMeta, s conversion.Scope) error { + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_Time_To_v1_Time(in *Time, out *Time, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_MicroTime_To_v1_MicroTime(in *MicroTime, out *MicroTime, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + +func Convert_Pointer_v1_Duration_To_v1_Duration(in **Duration, out *Duration, s conversion.Scope) error { + if *in == nil { + *out = Duration{} // zero duration + return nil + } + *out = **in // copy + return nil +} + +func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s conversion.Scope) error { + temp := *in //copy + *out = &temp + return nil +} + +// Convert_Slice_string_To_v1_Time allows converting a URL query parameter value +func Convert_Slice_string_To_v1_Time(input *[]string, out *Time, s conversion.Scope) error { + str := "" + if len(*input) > 0 { + str = (*input)[0] + } + return out.UnmarshalQueryParameter(str) +} + +func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { + selector, err := labels.Parse(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error { + selector, err := fields.ParseSelector(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { + *out = *in + return nil +} + +func Convert_Map_string_To_string_To_v1_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error { + if in == nil { + return nil + } + for labelKey, labelValue := range *in { + AddLabelToSelector(out, labelKey, labelValue) + } + return nil +} + +func Convert_v1_LabelSelector_To_Map_string_To_string(in *LabelSelector, out *map[string]string, s conversion.Scope) error { + var err error + *out, err = LabelSelectorAsMap(in) + return err +} + +// Convert_Slice_string_To_Slice_int32 converts multiple query parameters or +// a single query parameter with a comma delimited value to multiple int32. +// This is used for port forwarding which needs the ports as int32. +func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversion.Scope) error { + for _, s := range *in { + for _, v := range strings.Split(s, ",") { + x, err := strconv.ParseUint(v, 10, 16) + if err != nil { + return fmt.Errorf("cannot convert to []int32: %v", err) + } + *out = append(*out, int32(x)) + } + } + return nil +} + +// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy +func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error { + if len(*input) > 0 { + *out = DeletionPropagation((*input)[0]) + } else { + *out = "" + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go new file mode 100644 index 000000000000..dbaa87c879fc --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta.k8s.io + +package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go new file mode 100644 index 000000000000..babe8a8b53b7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -0,0 +1,60 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" +) + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +type Duration struct { + time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (d *Duration) UnmarshalJSON(b []byte) error { + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pd, err := time.ParseDuration(str) + if err != nil { + return err + } + d.Duration = pd + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.Duration.String()) +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Duration) OpenAPISchemaFormat() string { return "" } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go new file mode 100644 index 000000000000..4fa6f315738a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -0,0 +1,8256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto + + It has these top-level messages: + APIGroup + APIGroupList + APIResource + APIResourceList + APIVersions + CreateOptions + DeleteOptions + Duration + ExportOptions + GetOptions + GroupKind + GroupResource + GroupVersion + GroupVersionForDiscovery + GroupVersionKind + GroupVersionResource + Initializer + Initializers + LabelSelector + LabelSelectorRequirement + List + ListMeta + ListOptions + MicroTime + ObjectMeta + OwnerReference + Patch + Preconditions + RootPaths + ServerAddressByClientCIDR + Status + StatusCause + StatusDetails + Time + Timestamp + TypeMeta + UpdateOptions + Verbs + WatchEvent +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + +import time "time" +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *CreateOptions) Reset() { *m = CreateOptions{} } +func (*CreateOptions) ProtoMessage() {} +func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } +func (*GroupVersionForDiscovery) ProtoMessage() {} +func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{13} +} + +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *Initializer) Reset() { *m = Initializer{} } +func (*Initializer) ProtoMessage() {} +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *Initializers) Reset() { *m = Initializers{} } +func (*Initializers) ProtoMessage() {} +func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (*LabelSelectorRequirement) ProtoMessage() {} +func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{19} +} + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } +func (*ServerAddressByClientCIDR) ProtoMessage() {} +func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{29} +} + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } + +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } + +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } + +func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } +func (*UpdateOptions) ProtoMessage() {} +func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } + +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } + +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } + +func init() { + proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") + proto.RegisterType((*APIGroupList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroupList") + proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource") + proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList") + proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions") + proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") + proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") + proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") + proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") + proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") + proto.RegisterType((*GroupVersion)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersion") + proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") + proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") + proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") + proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") + proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") + proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") + proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") + proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") + proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") + proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") + proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") + proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") + proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") + proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") + proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") + proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") + proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") + proto.RegisterType((*UpdateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.UpdateOptions") + proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") + proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") +} +func (m *APIGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) + n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIGroupList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for _, msg := range m.Groups { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + if m.Namespaced { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + if m.Verbs != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Verbs.Size())) + n2, err := m.Verbs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) + i += copy(dAtA[i:], m.SingularName) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) + i += copy(dAtA[i:], m.StorageVersionHash) + return i, nil +} + +func (m *APIResourceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i += copy(dAtA[i:], m.GroupVersion) + if len(m.APIResources) > 0 { + for _, msg := range m.APIResources { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIVersions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GracePeriodSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) + n3, err := m.Preconditions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.OrphanDependents != nil { + dAtA[i] = 0x18 + i++ + if *m.OrphanDependents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.PropagationPolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i += copy(dAtA[i:], *m.PropagationPolicy) + } + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) + return i, nil +} + +func (m *ExportOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Export { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x10 + i++ + if m.Exact { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *GetOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + return i, nil +} + +func (m *GroupKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil +} + +func (m *GroupResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + return i, nil +} + +func (m *GroupVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + return i, nil +} + +func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i += copy(dAtA[i:], m.GroupVersion) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + return i, nil +} + +func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil +} + +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + return i, nil +} + +func (m *Initializer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *Initializers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Pending) > 0 { + for _, msg := range m.Pending { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Result != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) + n4, err := m.Result.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *LabelSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabels) > 0 { + keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) + for k := range m.MatchLabels { + keysForMatchLabels = append(keysForMatchLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + for _, k := range keysForMatchLabels { + dAtA[i] = 0xa + i++ + v := m.MatchLabels[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *List) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *List) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i += copy(dAtA[i:], m.SelfLink) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i += copy(dAtA[i:], m.Continue) + return i, nil +} + +func (m *ListOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i += copy(dAtA[i:], m.LabelSelector) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i += copy(dAtA[i:], m.FieldSelector) + dAtA[i] = 0x18 + i++ + if m.Watch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + } + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i += copy(dAtA[i:], m.Continue) + return i, nil +} + +func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i += copy(dAtA[i:], m.GenerateName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i += copy(dAtA[i:], m.SelfLink) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) + n6, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if m.DeletionTimestamp != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) + n7, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.DeletionGracePeriodSeconds != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for _, k := range keysForLabels { + dAtA[i] = 0x5a + i++ + v := m.Labels[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for _, k := range keysForAnnotations { + dAtA[i] = 0x62 + i++ + v := m.Annotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.OwnerReferences) > 0 { + for _, msg := range m.OwnerReferences { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + dAtA[i] = 0x72 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i += copy(dAtA[i:], m.ClusterName) + if m.Initializers != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) + n8, err := m.Initializers.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *OwnerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + if m.Controller != nil { + dAtA[i] = 0x30 + i++ + if *m.Controller { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.BlockOwnerDeletion != nil { + dAtA[i] = 0x38 + i++ + if *m.BlockOwnerDeletion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Patch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Patch) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *Preconditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UID != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i += copy(dAtA[i:], *m.UID) + } + return i, nil +} + +func (m *RootPaths) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) + i += copy(dAtA[i:], m.ClientCIDR) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) + i += copy(dAtA[i:], m.ServerAddress) + return i, nil +} + +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + if m.Details != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) + n10, err := m.Details.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) + return i, nil +} + +func (m *StatusCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) + i += copy(dAtA[i:], m.Field) + return i, nil +} + +func (m *StatusDetails) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + if len(m.Causes) > 0 { + for _, msg := range m.Causes { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + return i, nil +} + +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) + return i, nil +} + +func (m *TypeMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + return i, nil +} + +func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m Verbs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m Verbs) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *WatchEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n11, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *APIGroup) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.PreferredVersion.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIGroupList) Size() (n int) { + var l int + _ = l + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.Verbs != nil { + l = m.Verbs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SingularName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageVersionHash) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *APIResourceList) Size() (n int) { + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIResources) > 0 { + for _, e := range m.APIResources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIVersions) Size() (n int) { + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CreateOptions) Size() (n int) { + var l int + _ = l + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeleteOptions) Size() (n int) { + var l int + _ = l + if m.GracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + l = m.Preconditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OrphanDependents != nil { + n += 2 + } + if m.PropagationPolicy != nil { + l = len(*m.PropagationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Duration) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Duration)) + return n +} + +func (m *ExportOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *GetOptions) Size() (n int) { + var l int + _ = l + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupKind) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupResource) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersion) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionForDiscovery) Size() (n int) { + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionKind) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionResource) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Initializer) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Initializers) Size() (n int) { + var l int + _ = l + if len(m.Pending) > 0 { + for _, e := range m.Pending { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LabelSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *List) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListMeta) Size() (n int) { + var l int + _ = l + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Continue) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ListOptions) Size() (n int) { + var l int + _ = l + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + n += 1 + sovGenerated(uint64(m.Limit)) + l = len(m.Continue) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMeta) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GenerateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + l = m.CreationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeletionTimestamp != nil { + l = m.DeletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DeletionGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.OwnerReferences) > 0 { + for _, e := range m.OwnerReferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ClusterName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Initializers != nil { + l = m.Initializers.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *OwnerReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.Controller != nil { + n += 2 + } + if m.BlockOwnerDeletion != nil { + n += 2 + } + return n +} + +func (m *Patch) Size() (n int) { + var l int + _ = l + return n +} + +func (m *Preconditions) Size() (n int) { + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RootPaths) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServerAddressByClientCIDR) Size() (n int) { + var l int + _ = l + l = len(m.ClientCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServerAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Status) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + if m.Details != nil { + l = m.Details.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Code)) + return n +} + +func (m *StatusCause) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Field) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatusDetails) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Causes) > 0 { + for _, e := range m.Causes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.RetryAfterSeconds)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Timestamp) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + n += 1 + sovGenerated(uint64(m.Nanos)) + return n +} + +func (m *TypeMeta) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UpdateOptions) Size() (n int) { + var l int + _ = l + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m Verbs) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WatchEvent) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIGroup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIGroup{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *APIGroupList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIGroupList{`, + `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *APIResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIResource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespaced:` + fmt.Sprintf("%v", this.Namespaced) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Verbs:` + strings.Replace(fmt.Sprintf("%v", this.Verbs), "Verbs", "Verbs", 1) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `SingularName:` + fmt.Sprintf("%v", this.SingularName) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `StorageVersionHash:` + fmt.Sprintf("%v", this.StorageVersionHash) + `,`, + `}`, + }, "") + return s +} +func (this *APIResourceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIResourceList{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateOptions{`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteOptions{`, + `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, + `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, + `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `}`, + }, "") + return s +} +func (this *Duration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Duration{`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `}`, + }, "") + return s +} +func (this *ExportOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExportOptions{`, + `Export:` + fmt.Sprintf("%v", this.Export) + `,`, + `Exact:` + fmt.Sprintf("%v", this.Exact) + `,`, + `}`, + }, "") + return s +} +func (this *GetOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetOptions{`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `}`, + }, "") + return s +} +func (this *GroupVersionForDiscovery) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupVersionForDiscovery{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *Initializer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Initializer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Initializers) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Initializers{`, + `Pending:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Pending), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelector) String() string { + if this == nil { + return "nil" + } + keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) + for k := range this.MatchLabels { + keysForMatchLabels = append(keysForMatchLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + mapStringForMatchLabels := "map[string]string{" + for _, k := range keysForMatchLabels { + mapStringForMatchLabels += fmt.Sprintf("%v: %v,", k, this.MatchLabels[k]) + } + mapStringForMatchLabels += "}" + s := strings.Join([]string{`&LabelSelector{`, + `MatchLabels:` + mapStringForMatchLabels + `,`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListMeta{`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `}`, + }, "") + return s +} +func (this *ListOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListOptions{`, + `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`, + `FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`, + `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMeta) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ObjectMeta{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "Time", 1), `&`, ``, 1) + `,`, + `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, + `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, + `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, + `}`, + }, "") + return s +} +func (this *OwnerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OwnerReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Controller:` + valueToStringGenerated(this.Controller) + `,`, + `BlockOwnerDeletion:` + valueToStringGenerated(this.BlockOwnerDeletion) + `,`, + `}`, + }, "") + return s +} +func (this *Patch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Patch{`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *RootPaths) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootPaths{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `}`, + }, "") + return s +} +func (this *ServerAddressByClientCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerAddressByClientCIDR{`, + `ClientCIDR:` + fmt.Sprintf("%v", this.ClientCIDR) + `,`, + `ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`, + `}`, + }, "") + return s +} +func (this *Status) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Status{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, + `Code:` + fmt.Sprintf("%v", this.Code) + `,`, + `}`, + }, "") + return s +} +func (this *StatusCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusCause{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Field:` + fmt.Sprintf("%v", this.Field) + `,`, + `}`, + }, "") + return s +} +func (this *StatusDetails) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusDetails{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, + `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *Timestamp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Timestamp{`, + `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, + `Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`, + `}`, + }, "") + return s +} +func (this *TypeMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypeMeta{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateOptions{`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `}`, + }, "") + return s +} +func (this *WatchEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, GroupVersionForDiscovery{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PreferredVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIGroupList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, APIGroup{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Namespaced = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Verbs == nil { + m.Verbs = Verbs{} + } + if err := m.Verbs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SingularName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SingularName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageVersionHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageVersionHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResourceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIResources = append(m.APIResources, APIResource{}) + if err := m.APIResources[len(m.APIResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIVersions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GracePeriodSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Preconditions == nil { + m.Preconditions = &Preconditions{} + } + if err := m.Preconditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OrphanDependents = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := DeletionPropagation(dAtA[iNdEx:postIndex]) + m.PropagationPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= (time.Duration(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Export = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Exact = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Initializer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Initializer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Initializers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Initializers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pending = append(m.Pending, Initializer{}) + if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *List) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: List: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Continue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Watch = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Continue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeletionTimestamp == nil { + m.DeletionTimestamp = &Time{} + } + if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DeletionGracePeriodSeconds = &v + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Initializers == nil { + m.Initializers = &Initializers{} + } + if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BlockOwnerDeletion = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootPaths) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootPaths: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientCIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Details == nil { + m.Details = &StatusDetails{} + } + if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CauseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Field = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Causes = append(m.Causes, StatusCause{}) + if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType) + } + m.RetryAfterSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Verbs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Verbs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Verbs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2453 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49, + 0xf5, 0x4f, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa8, 0x9d, 0xfd, 0xff, 0xbd, 0x91, 0x88, 0xb3, + 0xbd, 0x68, 0x95, 0x85, 0x59, 0x9b, 0x64, 0x61, 0x35, 0x0c, 0x30, 0x90, 0x8e, 0x33, 0x43, 0xd8, + 0xc9, 0xc4, 0xaa, 0xec, 0x0c, 0x62, 0x18, 0x21, 0x3a, 0xdd, 0x15, 0xa7, 0x49, 0xbb, 0xdb, 0x5b, + 0xd5, 0xce, 0x8c, 0xe1, 0xc0, 0x1e, 0x40, 0x80, 0x84, 0xd0, 0x1c, 0x39, 0xa1, 0x1d, 0xc1, 0x85, + 0x2b, 0x27, 0x4e, 0x9c, 0x90, 0x98, 0xe3, 0x4a, 0x5c, 0xf6, 0x80, 0xac, 0x9d, 0x70, 0xe0, 0x84, + 0xb8, 0xe7, 0x80, 0x50, 0x55, 0x57, 0x77, 0x57, 0xdb, 0xf1, 0xa4, 0xcd, 0x2c, 0x88, 0x53, 0xdc, + 0xef, 0xe3, 0xf7, 0x5e, 0x55, 0xbd, 0xfa, 0xd5, 0xab, 0x0a, 0xec, 0x9d, 0x5c, 0x63, 0x0d, 0xc7, + 0x6f, 0x9e, 0xf4, 0x0f, 0x09, 0xf5, 0x48, 0x40, 0x58, 0xf3, 0x94, 0x78, 0xb6, 0x4f, 0x9b, 0x52, + 0x61, 0xf6, 0x9c, 0xae, 0x69, 0x1d, 0x3b, 0x1e, 0xa1, 0x83, 0x66, 0xef, 0xa4, 0xc3, 0x05, 0xac, + 0xd9, 0x25, 0x81, 0xd9, 0x3c, 0xdd, 0x68, 0x76, 0x88, 0x47, 0xa8, 0x19, 0x10, 0xbb, 0xd1, 0xa3, + 0x7e, 0xe0, 0xa3, 0x4f, 0x87, 0x5e, 0x0d, 0xd5, 0xab, 0xd1, 0x3b, 0xe9, 0x70, 0x01, 0x6b, 0x70, + 0xaf, 0xc6, 0xe9, 0xc6, 0xca, 0x9b, 0x1d, 0x27, 0x38, 0xee, 0x1f, 0x36, 0x2c, 0xbf, 0xdb, 0xec, + 0xf8, 0x1d, 0xbf, 0x29, 0x9c, 0x0f, 0xfb, 0x47, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x21, 0xe8, 0xca, + 0xc4, 0x54, 0x68, 0xdf, 0x0b, 0x9c, 0x2e, 0x19, 0xcd, 0x62, 0xe5, 0xed, 0xcb, 0x1c, 0x98, 0x75, + 0x4c, 0xba, 0xe6, 0xa8, 0x9f, 0xfe, 0xa7, 0x3c, 0x94, 0xb6, 0xda, 0xbb, 0xb7, 0xa8, 0xdf, 0xef, + 0xa1, 0x35, 0x98, 0xf5, 0xcc, 0x2e, 0xa9, 0x69, 0x6b, 0xda, 0x7a, 0xd9, 0x98, 0x7f, 0x3a, 0xac, + 0xcf, 0x9c, 0x0d, 0xeb, 0xb3, 0x77, 0xcc, 0x2e, 0xc1, 0x42, 0x83, 0x5c, 0x28, 0x9d, 0x12, 0xca, + 0x1c, 0xdf, 0x63, 0xb5, 0xdc, 0x5a, 0x7e, 0xbd, 0xb2, 0x79, 0xa3, 0x91, 0x65, 0xfc, 0x0d, 0x11, + 0xe0, 0x5e, 0xe8, 0x7a, 0xd3, 0xa7, 0x2d, 0x87, 0x59, 0xfe, 0x29, 0xa1, 0x03, 0x63, 0x49, 0x46, + 0x29, 0x49, 0x25, 0xc3, 0x71, 0x04, 0xf4, 0x23, 0x0d, 0x96, 0x7a, 0x94, 0x1c, 0x11, 0x4a, 0x89, + 0x2d, 0xf5, 0xb5, 0xfc, 0x9a, 0xf6, 0x09, 0x84, 0xad, 0xc9, 0xb0, 0x4b, 0xed, 0x11, 0x7c, 0x3c, + 0x16, 0x11, 0xfd, 0x5a, 0x83, 0x15, 0x46, 0xe8, 0x29, 0xa1, 0x5b, 0xb6, 0x4d, 0x09, 0x63, 0xc6, + 0x60, 0xdb, 0x75, 0x88, 0x17, 0x6c, 0xef, 0xb6, 0x30, 0xab, 0xcd, 0x8a, 0x79, 0xf8, 0x6a, 0xb6, + 0x84, 0x0e, 0x26, 0xe1, 0x18, 0xba, 0xcc, 0x68, 0x65, 0xa2, 0x09, 0xc3, 0xcf, 0x49, 0x43, 0x3f, + 0x82, 0xf9, 0x68, 0x21, 0x6f, 0x3b, 0x2c, 0x40, 0xf7, 0xa0, 0xd8, 0xe1, 0x1f, 0xac, 0xa6, 0x89, + 0x04, 0x1b, 0xd9, 0x12, 0x8c, 0x30, 0x8c, 0x05, 0x99, 0x4f, 0x51, 0x7c, 0x32, 0x2c, 0xd1, 0xf4, + 0x9f, 0xcd, 0x42, 0x65, 0xab, 0xbd, 0x8b, 0x09, 0xf3, 0xfb, 0xd4, 0x22, 0x19, 0x8a, 0x66, 0x13, + 0x80, 0xff, 0x65, 0x3d, 0xd3, 0x22, 0x76, 0x2d, 0xb7, 0xa6, 0xad, 0x97, 0x0c, 0x24, 0xed, 0xe0, + 0x4e, 0xac, 0xc1, 0x8a, 0x15, 0x47, 0x3d, 0x71, 0x3c, 0x5b, 0xac, 0xb6, 0x82, 0xfa, 0x8e, 0xe3, + 0xd9, 0x58, 0x68, 0xd0, 0x6d, 0x28, 0x9c, 0x12, 0x7a, 0xc8, 0xe7, 0x9f, 0x17, 0xc4, 0x67, 0xb3, + 0x0d, 0xef, 0x1e, 0x77, 0x31, 0xca, 0x67, 0xc3, 0x7a, 0x41, 0xfc, 0xc4, 0x21, 0x08, 0x6a, 0x00, + 0xb0, 0x63, 0x9f, 0x06, 0x22, 0x9d, 0x5a, 0x61, 0x2d, 0xbf, 0x5e, 0x36, 0x16, 0x78, 0x7e, 0x07, + 0xb1, 0x14, 0x2b, 0x16, 0xe8, 0x1a, 0xcc, 0x33, 0xc7, 0xeb, 0xf4, 0x5d, 0x93, 0x72, 0x41, 0xad, + 0x28, 0xf2, 0xbc, 0x22, 0xf3, 0x9c, 0x3f, 0x50, 0x74, 0x38, 0x65, 0xc9, 0x23, 0x59, 0x66, 0x40, + 0x3a, 0x3e, 0x75, 0x08, 0xab, 0xcd, 0x25, 0x91, 0xb6, 0x63, 0x29, 0x56, 0x2c, 0xd0, 0x6b, 0x50, + 0x10, 0x33, 0x5f, 0x2b, 0x89, 0x10, 0x55, 0x19, 0xa2, 0x20, 0x96, 0x05, 0x87, 0x3a, 0xf4, 0x06, + 0xcc, 0xc9, 0x5d, 0x53, 0x2b, 0x0b, 0xb3, 0x45, 0x69, 0x36, 0x17, 0x95, 0x75, 0xa4, 0x47, 0xdf, + 0x00, 0xc4, 0x02, 0x9f, 0x9a, 0x1d, 0x22, 0x55, 0x5f, 0x37, 0xd9, 0x71, 0x0d, 0x84, 0xd7, 0x8a, + 0xf4, 0x42, 0x07, 0x63, 0x16, 0xf8, 0x02, 0x2f, 0xfd, 0x77, 0x1a, 0x2c, 0x2a, 0xb5, 0x20, 0xea, + 0xee, 0x1a, 0xcc, 0x77, 0x94, 0x5d, 0x27, 0xeb, 0x22, 0x9e, 0x19, 0x75, 0x47, 0xe2, 0x94, 0x25, + 0x22, 0x50, 0xa6, 0x12, 0x29, 0x62, 0x97, 0x8d, 0xcc, 0x45, 0x1b, 0xe5, 0x90, 0x44, 0x52, 0x84, + 0x0c, 0x27, 0xc8, 0xfa, 0xdf, 0x34, 0x51, 0xc0, 0x11, 0xdf, 0xa0, 0x75, 0x85, 0xd3, 0x34, 0xb1, + 0x1c, 0xf3, 0x13, 0xf8, 0xe8, 0x12, 0x22, 0xc8, 0xfd, 0x4f, 0x10, 0xc1, 0xf5, 0xd2, 0x2f, 0x3f, + 0xa8, 0xcf, 0xbc, 0xff, 0x97, 0xb5, 0x19, 0xfd, 0x2d, 0xa8, 0x6e, 0x53, 0x62, 0x06, 0x64, 0xbf, + 0x17, 0x88, 0x01, 0xe8, 0x50, 0xb4, 0xe9, 0x00, 0xf7, 0x3d, 0x39, 0x50, 0xe0, 0xfb, 0xbb, 0x25, + 0x24, 0x58, 0x6a, 0xf4, 0x9f, 0xe4, 0xa1, 0xda, 0x22, 0x2e, 0x49, 0xbc, 0x6e, 0x02, 0xea, 0x50, + 0xd3, 0x22, 0x6d, 0x42, 0x1d, 0xdf, 0x3e, 0x20, 0x96, 0xef, 0xd9, 0x4c, 0xac, 0x6b, 0xde, 0xf8, + 0x3f, 0x5e, 0x2d, 0xb7, 0xc6, 0xb4, 0xf8, 0x02, 0x0f, 0xe4, 0x42, 0xb5, 0x47, 0xc5, 0x6f, 0x27, + 0x90, 0x27, 0x08, 0xdf, 0xb9, 0x6f, 0x65, 0x9b, 0xb0, 0xb6, 0xea, 0x6a, 0x2c, 0x9f, 0x0d, 0xeb, + 0xd5, 0x94, 0x08, 0xa7, 0xc1, 0xd1, 0xd7, 0x60, 0xc9, 0xa7, 0xbd, 0x63, 0xd3, 0x6b, 0x91, 0x1e, + 0xf1, 0x6c, 0xe2, 0x05, 0x4c, 0xb0, 0x49, 0xc9, 0xb8, 0xc2, 0x79, 0x7f, 0x7f, 0x44, 0x87, 0xc7, + 0xac, 0xd1, 0x7d, 0x58, 0xee, 0x51, 0xbf, 0x67, 0x76, 0x4c, 0x8e, 0xd8, 0xf6, 0x5d, 0xc7, 0x1a, + 0x08, 0xb6, 0x29, 0x1b, 0x57, 0xcf, 0x86, 0xf5, 0xe5, 0xf6, 0xa8, 0xf2, 0x7c, 0x58, 0x7f, 0x49, + 0x4c, 0x1d, 0x97, 0x24, 0x4a, 0x3c, 0x0e, 0xa3, 0xac, 0x44, 0x61, 0xe2, 0x4a, 0xec, 0x42, 0xa9, + 0xd5, 0xa7, 0xc2, 0x0b, 0x7d, 0x05, 0x4a, 0xb6, 0xfc, 0x2d, 0x67, 0xfe, 0xd5, 0xe8, 0xe0, 0x8c, + 0x6c, 0xce, 0x87, 0xf5, 0x2a, 0x3f, 0xea, 0x1b, 0x91, 0x00, 0xc7, 0x2e, 0xfa, 0x03, 0xa8, 0xee, + 0x3c, 0xea, 0xf9, 0x34, 0x88, 0xd6, 0xf4, 0x75, 0x28, 0x12, 0x21, 0x10, 0x68, 0xa5, 0x84, 0xed, + 0x43, 0x33, 0x2c, 0xb5, 0x9c, 0x7d, 0xc8, 0x23, 0xd3, 0x0a, 0x24, 0x6d, 0xc7, 0xec, 0xb3, 0xc3, + 0x85, 0x38, 0xd4, 0xe9, 0xfb, 0x00, 0xb7, 0x48, 0x0c, 0xbd, 0x05, 0x8b, 0xd1, 0x66, 0x4b, 0x73, + 0xc0, 0xff, 0x4b, 0xe7, 0x45, 0x9c, 0x56, 0xe3, 0x51, 0x7b, 0xfd, 0x01, 0x94, 0x05, 0x4f, 0x70, + 0xba, 0x4f, 0x08, 0x50, 0x7b, 0x0e, 0x01, 0x46, 0xe7, 0x45, 0x6e, 0xd2, 0x79, 0xa1, 0x6c, 0x0b, + 0x17, 0xaa, 0xa1, 0x6f, 0x74, 0x84, 0x65, 0x8a, 0x70, 0x15, 0x4a, 0x51, 0x9a, 0x32, 0x4a, 0xdc, + 0xba, 0x44, 0x40, 0x38, 0xb6, 0x50, 0xa2, 0x1d, 0x43, 0x8a, 0xf3, 0xb2, 0x05, 0x53, 0xf8, 0x3c, + 0xf7, 0x7c, 0x3e, 0x57, 0x22, 0xfd, 0x10, 0x6a, 0x93, 0xfa, 0x9d, 0x17, 0x60, 0xe5, 0xec, 0xa9, + 0xe8, 0xbf, 0xd0, 0x60, 0x49, 0x45, 0xca, 0xbe, 0x7c, 0xd9, 0x83, 0x5c, 0xde, 0x19, 0x28, 0x33, + 0xf2, 0x2b, 0x0d, 0xae, 0xa4, 0x86, 0x36, 0xd5, 0x8a, 0x4f, 0x91, 0x94, 0x5a, 0x1c, 0xf9, 0x29, + 0x8a, 0xa3, 0x09, 0x95, 0x5d, 0xcf, 0x09, 0x1c, 0xd3, 0x75, 0xbe, 0x4f, 0xe8, 0xe5, 0xbd, 0x94, + 0xfe, 0x07, 0x0d, 0xe6, 0x15, 0x0f, 0x86, 0x1e, 0xc0, 0x1c, 0x27, 0x2c, 0xc7, 0xeb, 0xc8, 0x3e, + 0x2f, 0xe3, 0x91, 0xa9, 0x80, 0x24, 0xe3, 0x6a, 0x87, 0x48, 0x38, 0x82, 0x44, 0x6d, 0x28, 0x52, + 0xc2, 0xfa, 0x6e, 0x20, 0xb9, 0xfa, 0x6a, 0xc6, 0xc3, 0x2d, 0x30, 0x83, 0x3e, 0x0b, 0x49, 0x0d, + 0x0b, 0x7f, 0x2c, 0x71, 0xf4, 0x3f, 0xe7, 0xa0, 0x7a, 0xdb, 0x3c, 0x24, 0xee, 0x01, 0x71, 0x89, + 0x15, 0xf8, 0x14, 0xfd, 0x00, 0x2a, 0x5d, 0x33, 0xb0, 0x8e, 0x85, 0x34, 0xea, 0x56, 0x5b, 0xd9, + 0x02, 0xa5, 0x90, 0x1a, 0x7b, 0x09, 0xcc, 0x8e, 0x17, 0xd0, 0x81, 0xf1, 0x92, 0x1c, 0x58, 0x45, + 0xd1, 0x60, 0x35, 0x9a, 0xb8, 0x62, 0x88, 0xef, 0x9d, 0x47, 0x3d, 0x7e, 0x94, 0x4e, 0x7f, 0xb3, + 0x49, 0xa5, 0x80, 0xc9, 0x7b, 0x7d, 0x87, 0x92, 0x2e, 0xf1, 0x82, 0xe4, 0x8a, 0xb1, 0x37, 0x82, + 0x8f, 0xc7, 0x22, 0xae, 0xdc, 0x80, 0xa5, 0xd1, 0xe4, 0xd1, 0x12, 0xe4, 0x4f, 0xc8, 0x20, 0xac, + 0x05, 0xcc, 0x7f, 0xa2, 0x2b, 0x50, 0x38, 0x35, 0xdd, 0xbe, 0xe4, 0x1f, 0x1c, 0x7e, 0x5c, 0xcf, + 0x5d, 0xd3, 0xf4, 0xdf, 0x68, 0x50, 0x9b, 0x94, 0x08, 0xfa, 0x94, 0x02, 0x64, 0x54, 0x64, 0x56, + 0xf9, 0x77, 0xc8, 0x20, 0x44, 0xdd, 0x81, 0x92, 0xdf, 0xe3, 0x97, 0x42, 0x9f, 0xca, 0x3a, 0x7f, + 0x23, 0xaa, 0xdd, 0x7d, 0x29, 0x3f, 0x1f, 0xd6, 0x5f, 0x4e, 0xc1, 0x47, 0x0a, 0x1c, 0xbb, 0xf2, + 0x13, 0x4d, 0xe4, 0xc3, 0x4f, 0xd9, 0xf8, 0x44, 0xbb, 0x27, 0x24, 0x58, 0x6a, 0xf4, 0xdf, 0x6b, + 0x30, 0x2b, 0x9a, 0xc4, 0x07, 0x50, 0xe2, 0xf3, 0x67, 0x9b, 0x81, 0x29, 0xf2, 0xca, 0x7c, 0x3d, + 0xe1, 0xde, 0x7b, 0x24, 0x30, 0x93, 0xfd, 0x15, 0x49, 0x70, 0x8c, 0x88, 0x30, 0x14, 0x9c, 0x80, + 0x74, 0xa3, 0x85, 0x7c, 0x73, 0x22, 0xb4, 0xbc, 0x1c, 0x37, 0xb0, 0xf9, 0x70, 0xe7, 0x51, 0x40, + 0x3c, 0xbe, 0x18, 0x09, 0x19, 0xec, 0x72, 0x0c, 0x1c, 0x42, 0xe9, 0xbf, 0xd5, 0x20, 0x0e, 0xc5, + 0xb7, 0x3b, 0x23, 0xee, 0xd1, 0x6d, 0xc7, 0x3b, 0x91, 0xd3, 0x1a, 0xa7, 0x73, 0x20, 0xe5, 0x38, + 0xb6, 0xb8, 0xe8, 0x40, 0xcc, 0x4d, 0x77, 0x20, 0xf2, 0x80, 0x96, 0xef, 0x05, 0x8e, 0xd7, 0x1f, + 0xe3, 0x97, 0x6d, 0x29, 0xc7, 0xb1, 0x85, 0xfe, 0xcf, 0x1c, 0x54, 0x78, 0xae, 0xd1, 0x89, 0xfc, + 0x25, 0xa8, 0xba, 0xea, 0xea, 0xc9, 0x9c, 0x5f, 0x96, 0x10, 0xe9, 0xfd, 0x88, 0xd3, 0xb6, 0xdc, + 0xf9, 0xc8, 0x21, 0xae, 0x1d, 0x3b, 0xe7, 0xd2, 0xce, 0x37, 0x55, 0x25, 0x4e, 0xdb, 0x72, 0x9e, + 0x7d, 0xc8, 0xeb, 0x5a, 0x76, 0x5e, 0xf1, 0xd4, 0x7e, 0x93, 0x0b, 0x71, 0xa8, 0xbb, 0x68, 0x7e, + 0x66, 0xa7, 0x9c, 0x9f, 0xeb, 0xb0, 0xc0, 0x17, 0xd2, 0xef, 0x07, 0x51, 0x7b, 0x5a, 0x10, 0x4d, + 0x12, 0x3a, 0x1b, 0xd6, 0x17, 0xde, 0x4d, 0x69, 0xf0, 0x88, 0x25, 0xcf, 0xd1, 0x75, 0xba, 0x4e, + 0x50, 0x9b, 0x13, 0x2e, 0x71, 0x8e, 0xb7, 0xb9, 0x10, 0x87, 0xba, 0xd4, 0x02, 0x94, 0x2e, 0x5d, + 0x80, 0xf7, 0xa0, 0xbc, 0xe7, 0x58, 0xd4, 0xe7, 0x91, 0xf9, 0x31, 0xc2, 0x52, 0x3d, 0x73, 0x4c, + 0xb7, 0x51, 0x46, 0x91, 0x9e, 0xa7, 0xe2, 0x99, 0x9e, 0x1f, 0x76, 0xc6, 0x85, 0x24, 0x95, 0x3b, + 0x5c, 0x88, 0x43, 0xdd, 0xf5, 0x2b, 0xfc, 0xf4, 0xf8, 0xe9, 0x93, 0xfa, 0xcc, 0xe3, 0x27, 0xf5, + 0x99, 0x0f, 0x9e, 0xc8, 0x93, 0xe4, 0xef, 0x00, 0xb0, 0x7f, 0xf8, 0x3d, 0x62, 0x85, 0x15, 0x7a, + 0xf9, 0xad, 0x9c, 0x77, 0x04, 0xf2, 0x31, 0x48, 0xdc, 0x60, 0x73, 0x23, 0x1d, 0x81, 0xa2, 0xc3, + 0x29, 0x4b, 0xd4, 0x84, 0x72, 0x7c, 0x53, 0x97, 0xd5, 0xb8, 0x2c, 0xdd, 0xca, 0xf1, 0x75, 0x1e, + 0x27, 0x36, 0xa9, 0xed, 0x32, 0x7b, 0xe9, 0x76, 0x31, 0x20, 0xdf, 0x77, 0x6c, 0xb1, 0x80, 0x65, + 0xe3, 0x73, 0x11, 0x5d, 0xdd, 0xdd, 0x6d, 0x9d, 0x0f, 0xeb, 0xaf, 0x4e, 0x7a, 0xe6, 0x0a, 0x06, + 0x3d, 0xc2, 0x1a, 0x77, 0x77, 0x5b, 0x98, 0x3b, 0x5f, 0x54, 0x52, 0xc5, 0x29, 0x4b, 0x6a, 0x13, + 0x40, 0x8e, 0x9a, 0x7b, 0x87, 0xb5, 0x11, 0xbf, 0x5a, 0xdc, 0x8a, 0x35, 0x58, 0xb1, 0x42, 0x0c, + 0x96, 0x2d, 0x7e, 0xe1, 0x72, 0x7c, 0x8f, 0x2f, 0x3d, 0x0b, 0xcc, 0x6e, 0x78, 0x6f, 0xaf, 0x6c, + 0x7e, 0x26, 0x1b, 0xbf, 0x71, 0x37, 0xe3, 0x15, 0x19, 0x66, 0x79, 0x7b, 0x14, 0x0c, 0x8f, 0xe3, + 0x23, 0x1f, 0x96, 0x6d, 0x79, 0xe9, 0x48, 0x82, 0x96, 0xa7, 0x0e, 0xfa, 0x32, 0x0f, 0xd8, 0x1a, + 0x05, 0xc2, 0xe3, 0xd8, 0xe8, 0x3b, 0xb0, 0x12, 0x09, 0xc7, 0x6f, 0x7e, 0xe2, 0x25, 0x21, 0x6f, + 0xac, 0xf2, 0x0b, 0x6c, 0x6b, 0xa2, 0x15, 0x7e, 0x0e, 0x02, 0xb2, 0xa1, 0xe8, 0x86, 0xbd, 0x40, + 0x45, 0xf0, 0xf7, 0x97, 0xb3, 0x8d, 0x22, 0xa9, 0xfe, 0x86, 0xda, 0x03, 0xc4, 0x37, 0x1b, 0x79, + 0xfc, 0x4b, 0x6c, 0xf4, 0x08, 0x2a, 0xa6, 0xe7, 0xf9, 0x81, 0x19, 0xde, 0x45, 0xe7, 0x45, 0xa8, + 0xad, 0xa9, 0x43, 0x6d, 0x25, 0x18, 0x23, 0x3d, 0x87, 0xa2, 0xc1, 0x6a, 0x28, 0xf4, 0x10, 0x16, + 0xfd, 0x87, 0x1e, 0xa1, 0x98, 0x1c, 0x11, 0x4a, 0x3c, 0x8b, 0xb0, 0x5a, 0x55, 0x44, 0xff, 0x7c, + 0xc6, 0xe8, 0x29, 0xe7, 0xa4, 0xa4, 0xd3, 0x72, 0x86, 0x47, 0xa3, 0xa0, 0x06, 0xc0, 0x91, 0xe3, + 0xc9, 0xce, 0xb1, 0xb6, 0x90, 0x3c, 0x3d, 0xdd, 0x8c, 0xa5, 0x58, 0xb1, 0x40, 0x5f, 0x80, 0x8a, + 0xe5, 0xf6, 0x59, 0x40, 0xc2, 0x37, 0xae, 0x45, 0xb1, 0x83, 0xe2, 0xf1, 0x6d, 0x27, 0x2a, 0xac, + 0xda, 0xa1, 0x63, 0x98, 0x77, 0x94, 0x16, 0xb5, 0xb6, 0x24, 0x6a, 0x71, 0x73, 0xea, 0xbe, 0x94, + 0x19, 0x4b, 0x9c, 0x89, 0x54, 0x09, 0x4e, 0x21, 0xaf, 0x7c, 0x11, 0x2a, 0xff, 0x66, 0xc7, 0xc4, + 0x3b, 0xae, 0xd1, 0xa5, 0x9b, 0xaa, 0xe3, 0xfa, 0x63, 0x0e, 0x16, 0xd2, 0x13, 0x1e, 0xdf, 0x4c, + 0xb4, 0x89, 0x6f, 0x96, 0x11, 0x2b, 0xe7, 0x27, 0xb2, 0xb2, 0x24, 0xbf, 0xd9, 0x17, 0x21, 0xbf, + 0x4d, 0x00, 0xb3, 0xe7, 0x44, 0xbc, 0x17, 0xf2, 0x68, 0xcc, 0x5c, 0xc9, 0xcb, 0x17, 0x56, 0xac, + 0xc4, 0xab, 0xa4, 0xef, 0x05, 0xd4, 0x77, 0x5d, 0x42, 0x05, 0x57, 0x96, 0xe4, 0xab, 0x64, 0x2c, + 0xc5, 0x8a, 0x05, 0xba, 0x09, 0xe8, 0xd0, 0xf5, 0xad, 0x13, 0x31, 0x05, 0xd1, 0x3e, 0x17, 0x2c, + 0x59, 0x0a, 0xdf, 0x84, 0x8c, 0x31, 0x2d, 0xbe, 0xc0, 0x43, 0x9f, 0x83, 0x42, 0x9b, 0x37, 0x01, + 0xfa, 0x3e, 0xa4, 0x9f, 0x73, 0xd0, 0x8d, 0x70, 0x26, 0xb4, 0xf8, 0xbd, 0x65, 0xba, 0x59, 0xd0, + 0xaf, 0x42, 0x19, 0xfb, 0x7e, 0xd0, 0x36, 0x83, 0x63, 0x86, 0xea, 0x50, 0xe8, 0xf1, 0x1f, 0xf2, + 0xdd, 0x4b, 0xbc, 0xff, 0x0a, 0x0d, 0x0e, 0xe5, 0xfa, 0xcf, 0x35, 0x78, 0x65, 0xe2, 0x7b, 0x1b, + 0x9f, 0x51, 0x2b, 0xfe, 0x92, 0x29, 0xc5, 0x33, 0x9a, 0xd8, 0x61, 0xc5, 0x8a, 0xf7, 0x4d, 0xa9, + 0x47, 0xba, 0xd1, 0xbe, 0x29, 0x15, 0x0d, 0xa7, 0x6d, 0xf5, 0x7f, 0xe4, 0xa0, 0x18, 0x5e, 0xa2, + 0xfe, 0xc3, 0xad, 0xf2, 0xeb, 0x50, 0x64, 0x22, 0x8e, 0x4c, 0x2f, 0x66, 0xcb, 0x30, 0x3a, 0x96, + 0x5a, 0xde, 0xc4, 0x74, 0x09, 0x63, 0x66, 0x27, 0x2a, 0xde, 0xb8, 0x89, 0xd9, 0x0b, 0xc5, 0x38, + 0xd2, 0xa3, 0xb7, 0xf9, 0x9d, 0xd1, 0x64, 0x71, 0x17, 0xb7, 0x1a, 0x41, 0x62, 0x21, 0x3d, 0x1f, + 0xd6, 0xe7, 0x25, 0xb8, 0xf8, 0xc6, 0xd2, 0x1a, 0xdd, 0x87, 0x39, 0x9b, 0x04, 0xa6, 0xe3, 0x86, + 0xcd, 0x5b, 0xe6, 0x87, 0xc1, 0x10, 0xac, 0x15, 0xba, 0x1a, 0x15, 0x9e, 0x93, 0xfc, 0xc0, 0x11, + 0x20, 0xdf, 0x78, 0x96, 0x6f, 0x87, 0xcf, 0xf4, 0x85, 0x64, 0xe3, 0x6d, 0xfb, 0x36, 0xc1, 0x42, + 0xa3, 0x3f, 0xd6, 0xa0, 0x12, 0x22, 0x6d, 0x9b, 0x7d, 0x46, 0xd0, 0x46, 0x3c, 0x8a, 0x70, 0xb9, + 0xa3, 0x33, 0x79, 0xf6, 0xdd, 0x41, 0x8f, 0x9c, 0x0f, 0xeb, 0x65, 0x61, 0xc6, 0x3f, 0xe2, 0x01, + 0x28, 0x73, 0x94, 0xbb, 0x64, 0x8e, 0x5e, 0x83, 0x82, 0x68, 0x94, 0xe5, 0x64, 0xc6, 0x8d, 0x9e, + 0x68, 0xa6, 0x71, 0xa8, 0xd3, 0x3f, 0xce, 0x41, 0x35, 0x35, 0xb8, 0x0c, 0x5d, 0x5d, 0xfc, 0xb0, + 0x91, 0xcb, 0xf0, 0x58, 0x36, 0xf9, 0x9f, 0x2b, 0xdf, 0x82, 0xa2, 0xc5, 0xc7, 0x17, 0xfd, 0x77, + 0x6b, 0x63, 0x9a, 0xa5, 0x10, 0x33, 0x93, 0x54, 0x92, 0xf8, 0x64, 0x58, 0x02, 0xa2, 0x5b, 0xb0, + 0x4c, 0x49, 0x40, 0x07, 0x5b, 0x47, 0x01, 0xa1, 0x6a, 0xb7, 0x5e, 0x48, 0xfa, 0x1e, 0x3c, 0x6a, + 0x80, 0xc7, 0x7d, 0x22, 0xaa, 0x2c, 0xbe, 0x00, 0x55, 0xea, 0x2e, 0xcc, 0xfe, 0x17, 0x7b, 0xf4, + 0x6f, 0x43, 0x39, 0xe9, 0xa2, 0x3e, 0xe1, 0x90, 0xfa, 0x77, 0xa1, 0xc4, 0xab, 0x31, 0xea, 0xfe, + 0x2f, 0x39, 0x89, 0xd2, 0x67, 0x44, 0x2e, 0xcb, 0x19, 0xa1, 0xbf, 0x05, 0xd5, 0xbb, 0x3d, 0x7b, + 0xca, 0x7f, 0x27, 0x6c, 0x42, 0xf8, 0x8f, 0x36, 0x4e, 0xc1, 0xe1, 0xa5, 0x5c, 0xa1, 0x60, 0xf5, + 0x86, 0xad, 0xbc, 0x8a, 0xfd, 0x58, 0x03, 0x10, 0x37, 0xc4, 0x9d, 0x53, 0xe2, 0x05, 0x7c, 0x34, + 0x7c, 0xd9, 0x46, 0x47, 0x23, 0xf6, 0x9e, 0xd0, 0xa0, 0xbb, 0x50, 0xf4, 0x45, 0x4b, 0x26, 0x9f, + 0xa9, 0xa6, 0xbc, 0xf1, 0xc7, 0xa5, 0x1a, 0xf6, 0x75, 0x58, 0x82, 0x19, 0xeb, 0x4f, 0x9f, 0xad, + 0xce, 0x7c, 0xf8, 0x6c, 0x75, 0xe6, 0xa3, 0x67, 0xab, 0x33, 0xef, 0x9f, 0xad, 0x6a, 0x4f, 0xcf, + 0x56, 0xb5, 0x0f, 0xcf, 0x56, 0xb5, 0x8f, 0xce, 0x56, 0xb5, 0x8f, 0xcf, 0x56, 0xb5, 0xc7, 0x7f, + 0x5d, 0x9d, 0xb9, 0x9f, 0x3b, 0xdd, 0xf8, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x3d, 0xb9, + 0xdd, 0x54, 0x20, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto new file mode 100644 index 000000000000..47d8495f1c19 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -0,0 +1,879 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.apis.meta.v1; + +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +message APIGroup { + // name is the name of the group. + optional string name = 1; + + // versions are the versions supported in this group. + repeated GroupVersionForDiscovery versions = 2; + + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + optional GroupVersionForDiscovery preferredVersion = 3; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +optional + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; +} + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +message APIGroupList { + // groups is a list of APIGroup. + repeated APIGroup groups = 1; +} + +// APIResource specifies the name of a resource and whether it is namespaced. +message APIResource { + // name is the plural name of the resource. + optional string name = 1; + + // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // The singularName is more correct for reporting status on a single item and both singular and plural are allowed + // from the kubectl CLI interface. + optional string singularName = 6; + + // namespaced indicates if a resource is namespaced or not. + optional bool namespaced = 2; + + // group is the preferred group of the resource. Empty implies the group of the containing resource list. + // For subresources, this may have a different value, for example: Scale". + optional string group = 8; + + // version is the preferred version of the resource. Empty implies the version of the containing resource list + // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". + optional string version = 9; + + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + optional string kind = 3; + + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + optional Verbs verbs = 4; + + // shortNames is a list of suggested short names of the resource. + repeated string shortNames = 5; + + // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + repeated string categories = 7; + + // The hash value of the storage version, the version this resource is + // converted to when written to the data store. Value must be treated + // as opaque by clients. Only equality comparison on the value is valid. + // This is an alpha feature and may change or be removed in the future. + // The field is populated by the apiserver only if the + // StorageVersionHash feature gate is enabled. + // This field will remain optional even if it graduates. + // +optional + optional string storageVersionHash = 10; +} + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +message APIResourceList { + // groupVersion is the group and version this APIResourceList is for. + optional string groupVersion = 1; + + // resources contains the name of the resources and if they are namespaced. + repeated APIResource resources = 2; +} + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message APIVersions { + // versions are the api versions that are available. + repeated string versions = 1; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; +} + +// CreateOptions may be provided when creating an API object. +message CreateOptions { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + repeated string dryRun = 1; +} + +// DeleteOptions may be provided when deleting an API object. +message DeleteOptions { + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + optional int64 gracePeriodSeconds = 1; + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + optional Preconditions preconditions = 2; + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + optional bool orphanDependents = 3; + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + // +optional + optional string propagationPolicy = 4; + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + repeated string dryRun = 5; +} + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +message Duration { + optional int64 duration = 1; +} + +// ExportOptions is the query options to the standard REST get call. +message ExportOptions { + // Should this value be exported. Export strips fields that a user can not specify. + optional bool export = 1; + + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + optional bool exact = 2; +} + +// GetOptions is the standard query options to the standard REST get call. +message GetOptions { + // When specified: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + optional string resourceVersion = 1; +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupKind { + optional string group = 1; + + optional string kind = 2; +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupResource { + optional string group = 1; + + optional string resource = 2; +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersion { + optional string group = 1; + + optional string version = 2; +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +message GroupVersionForDiscovery { + // groupVersion specifies the API group and version in the form "group/version" + optional string groupVersion = 1; + + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + optional string version = 2; +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionKind { + optional string group = 1; + + optional string version = 2; + + optional string kind = 3; +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionResource { + optional string group = 1; + + optional string version = 2; + + optional string resource = 3; +} + +// Initializer is information about an initializer that has not yet completed. +message Initializer { + // name of the process that is responsible for initializing this object. + optional string name = 1; +} + +// Initializers tracks the progress of initialization. +message Initializers { + // Pending is a list of initializers that must execute in order before this object is visible. + // When the last pending initializer is removed, and no failing result is set, the initializers + // struct will be set to nil and the object is considered as initialized and visible to all + // clients. + // +patchMergeKey=name + // +patchStrategy=merge + repeated Initializer pending = 1; + + // If result is set with the Failure field, the object will be persisted to storage and then deleted, + // ensuring that other clients can observe the deletion. + optional Status result = 2; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + map matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + repeated string values = 3; +} + +// List holds a list of objects, which may not be known by the server. +message List { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // List of objects + repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +message ListMeta { + // selfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 1; + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 2; + + // continue may be set if the user set a limit on the number of items returned, and indicates that + // the server has more data available. The value is opaque and may be used to issue another request + // to the endpoint that served this list to retrieve the next set of available objects. Continuing a + // consistent list may not be possible if the server configuration has changed or more than a few + // minutes have passed. The resourceVersion field returned when using this continue value will be + // identical to the value in the first response, unless you have received this token from an error + // message. + optional string continue = 3; +} + +// ListOptions is the query options to a standard REST list call. +message ListOptions { + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + optional string labelSelector = 1; + + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + optional string fieldSelector = 2; + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + optional bool watch = 3; + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + optional string resourceVersion = 4; + + // Timeout for the list/watch call. + // This limits the duration of the call, regardless of any activity or inactivity. + // +optional + optional int64 timeoutSeconds = 5; + + // limit is a maximum number of responses to return for a list call. If more items exist, the + // server will set the `continue` field on the list metadata to a value that can be used with the + // same initial query to retrieve the next set of results. Setting a limit may return fewer than + // the requested amount of items (up to zero items) in the event all requested objects are + // filtered out and clients should only use the presence of the continue field to determine whether + // more results are available. Servers may choose not to support the limit argument and will return + // all of the available results. If limit is specified and the continue field is empty, clients may + // assume that no more results are available. This field is not supported if watch is true. + // + // The server guarantees that the objects returned when using continue will be identical to issuing + // a single list call without a limit - that is, no objects created, modified, or deleted after the + // first request is issued will be included in any subsequent continued requests. This is sometimes + // referred to as a consistent snapshot, and ensures that a client that is using limit to receive + // smaller chunks of a very large result can ensure they see all possible objects. If objects are + // updated during a chunked list the version of the object that was present at the time the first list + // result was calculated is returned. + optional int64 limit = 7; + + // The continue option should be set when retrieving more results from the server. Since this value is + // server defined, clients may only use the continue value from a previous query result with identical + // query parameters (except for the value of continue) and the server may reject a continue value it + // does not recognize. If the specified continue value is no longer valid whether due to expiration + // (generally five to fifteen minutes) or a configuration change on the server, the server will + // respond with a 410 ResourceExpired error together with a continue token. If the client needs a + // consistent list, it must restart their list without the continue field. Otherwise, the client may + // send another list request with the token received with the 410 error, the server will respond with + // a list starting from the next key, but from the latest snapshot, which is inconsistent from the + // previous list results - objects that are created, modified, or deleted after the first list request + // will be included in the response, as long as their keys are after the "next key". + // + // This field is not supported when watch is true. Clients may start a watch from the last + // resourceVersion value returned by the server and not miss any modifications. + optional string continue = 8; +} + +// MicroTime is version of Time with microsecond level precision. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +message MicroTime { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +message ObjectMeta { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + optional string name = 1; + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // +optional + optional string generateName = 2; + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + optional string namespace = 3; + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 4; + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 5; + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + optional int64 generation = 7; + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional Time creationTimestamp = 8; + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional Time deletionTimestamp = 9; + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + optional int64 deletionGracePeriodSeconds = 10; + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + map labels = 11; + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + map annotations = 12; + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + repeated OwnerReference ownerReferences = 13; + + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + // + // DEPRECATED - initializers are an alpha field and will be removed in v1.15. + optional Initializers initializers = 16; + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + // +patchStrategy=merge + repeated string finalizers = 14; + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + optional string clusterName = 15; +} + +// OwnerReference contains enough information to let you identify an owning +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. +message OwnerReference { + // API version of the referent. + optional string apiVersion = 5; + + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + optional string kind = 1; + + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 3; + + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + optional string uid = 4; + + // If true, this reference points to the managing controller. + // +optional + optional bool controller = 6; + + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + optional bool blockOwnerDeletion = 7; +} + +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +message Patch { +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +message Preconditions { + // Specifies the target UID. + // +optional + optional string uid = 1; +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +message RootPaths { + // paths are the paths available at root. + repeated string paths = 1; +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +message ServerAddressByClientCIDR { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + optional string clientCIDR = 1; + + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + optional string serverAddress = 2; +} + +// Status is a return value for calls that don't return other objects. +message Status { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional string status = 2; + + // A human-readable description of the status of this operation. + // +optional + optional string message = 3; + + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + optional string reason = 4; + + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + optional StatusDetails details = 5; + + // Suggested HTTP return code for this status, 0 if not set. + // +optional + optional int32 code = 6; +} + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +message StatusCause { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + optional string reason = 1; + + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + optional string message = 2; + + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + optional string field = 3; +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +message StatusDetails { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + optional string name = 1; + + // The group attribute of the resource associated with the status StatusReason. + // +optional + optional string group = 2; + + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 3; + + // UID of the resource. + // (when there is a single resource which can be described). + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 6; + + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + repeated StatusCause causes = 4; + + // If specified, the time in seconds before the operation should be retried. Some errors may indicate + // the client must take an alternate action - for those errors this field may indicate how long to wait + // before taking the alternate action. + // +optional + optional int32 retryAfterSeconds = 5; +} + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Time { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false +message TypeMeta { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 1; + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // +optional + optional string apiVersion = 2; +} + +// UpdateOptions may be provided when updating an API object. +message UpdateOptions { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + repeated string dryRun = 1; +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Verbs { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Event represents a single event to a watched resource. +// +// +protobuf=true +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WatchEvent { + optional string type = 1; + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *Status is recommended; other types may make sense + // depending on context. + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go new file mode 100644 index 000000000000..bd4c6d9b5861 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -0,0 +1,148 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupResource struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` +} + +func (gr *GroupResource) String() string { + if len(gr.Group) == 0 { + return gr.Resource + } + return gr.Resource + "." + gr.Group +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersionResource struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` + Resource string `json:"resource" protobuf:"bytes,3,opt,name=resource"` +} + +func (gvr *GroupVersionResource) String() string { + return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupKind struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` +} + +func (gk *GroupKind) String() string { + if len(gk.Group) == 0 { + return gk.Kind + } + return gk.Kind + "." + gk.Group +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersionKind struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` + Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` +} + +func (gvk GroupVersionKind) String() string { + return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersion struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` +} + +// Empty returns true if group and version are empty +func (gv GroupVersion) Empty() bool { + return len(gv.Group) == 0 && len(gv.Version) == 0 +} + +// String puts "group" and "version" into a single "group/version" string. For the legacy v1 +// it returns "v1". +func (gv GroupVersion) String() string { + // special case the internal apiVersion for the legacy kube types + if gv.Empty() { + return "" + } + + // special case of "v1" for backward compatibility + if len(gv.Group) == 0 && gv.Version == "v1" { + return gv.Version + } + if len(gv.Group) > 0 { + return gv.Group + "/" + gv.Version + } + return gv.Version +} + +// MarshalJSON implements the json.Marshaller interface. +func (gv GroupVersion) MarshalJSON() ([]byte, error) { + s := gv.String() + if strings.Count(s, "/") > 1 { + return []byte{}, fmt.Errorf("illegal GroupVersion %v: contains more than one /", s) + } + return json.Marshal(s) +} + +func (gv *GroupVersion) unmarshal(value []byte) error { + var s string + if err := json.Unmarshal(value, &s); err != nil { + return err + } + parsed, err := schema.ParseGroupVersion(s) + if err != nil { + return err + } + gv.Group, gv.Version = parsed.Group, parsed.Version + return nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (gv *GroupVersion) UnmarshalJSON(value []byte) error { + return gv.unmarshal(value) +} + +// UnmarshalTEXT implements the Ugorji's encoding.TextUnmarshaler interface. +func (gv *GroupVersion) UnmarshalText(value []byte) error { + return gv.unmarshal(value) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go new file mode 100644 index 000000000000..604129ea101d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -0,0 +1,246 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" +) + +// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements +// labels.Selector +// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go +func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { + if ps == nil { + return labels.Nothing(), nil + } + if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 { + return labels.Everything(), nil + } + selector := labels.NewSelector() + for k, v := range ps.MatchLabels { + r, err := labels.NewRequirement(k, selection.Equals, []string{v}) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + for _, expr := range ps.MatchExpressions { + var op selection.Operator + switch expr.Operator { + case LabelSelectorOpIn: + op = selection.In + case LabelSelectorOpNotIn: + op = selection.NotIn + case LabelSelectorOpExists: + op = selection.Exists + case LabelSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...)) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// LabelSelectorAsMap converts the LabelSelector api type into a map of strings, ie. the +// original structure of a label selector. Operators that cannot be converted into plain +// labels (Exists, DoesNotExist, NotIn, and In with more than one value) will result in +// an error. +func LabelSelectorAsMap(ps *LabelSelector) (map[string]string, error) { + if ps == nil { + return nil, nil + } + selector := map[string]string{} + for k, v := range ps.MatchLabels { + selector[k] = v + } + for _, expr := range ps.MatchExpressions { + switch expr.Operator { + case LabelSelectorOpIn: + if len(expr.Values) != 1 { + return selector, fmt.Errorf("operator %q without a single value cannot be converted into the old label selector format", expr.Operator) + } + // Should we do anything in case this will override a previous key-value pair? + selector[expr.Key] = expr.Values[0] + case LabelSelectorOpNotIn, LabelSelectorOpExists, LabelSelectorOpDoesNotExist: + return selector, fmt.Errorf("operator %q cannot be converted into the old label selector format", expr.Operator) + default: + return selector, fmt.Errorf("%q is not a valid selector operator", expr.Operator) + } + } + return selector, nil +} + +// ParseToLabelSelector parses a string representing a selector into a LabelSelector object. +// Note: This function should be kept in sync with the parser in pkg/labels/selector.go +func ParseToLabelSelector(selector string) (*LabelSelector, error) { + reqs, err := labels.ParseToRequirements(selector) + if err != nil { + return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %v", selector, err) + } + + labelSelector := &LabelSelector{ + MatchLabels: map[string]string{}, + MatchExpressions: []LabelSelectorRequirement{}, + } + for _, req := range reqs { + var op LabelSelectorOperator + switch req.Operator() { + case selection.Equals, selection.DoubleEquals: + vals := req.Values() + if vals.Len() != 1 { + return nil, fmt.Errorf("equals operator must have exactly one value") + } + val, ok := vals.PopAny() + if !ok { + return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved") + } + labelSelector.MatchLabels[req.Key()] = val + continue + case selection.In: + op = LabelSelectorOpIn + case selection.NotIn: + op = LabelSelectorOpNotIn + case selection.Exists: + op = LabelSelectorOpExists + case selection.DoesNotExist: + op = LabelSelectorOpDoesNotExist + case selection.GreaterThan, selection.LessThan: + // Adding a separate case for these operators to indicate that this is deliberate + return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator()) + default: + return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator()) + } + labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{ + Key: req.Key(), + Operator: op, + Values: req.Values().List(), + }) + } + return labelSelector, nil +} + +// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object. +func SetAsLabelSelector(ls labels.Set) *LabelSelector { + if ls == nil { + return nil + } + + selector := &LabelSelector{ + MatchLabels: make(map[string]string), + } + for label, value := range ls { + selector.MatchLabels[label] = value + } + + return selector +} + +// FormatLabelSelector convert labelSelector into plain string +func FormatLabelSelector(labelSelector *LabelSelector) string { + selector, err := LabelSelectorAsSelector(labelSelector) + if err != nil { + return "" + } + + l := selector.String() + if len(l) == 0 { + l = "" + } + return l +} + +func ExtractGroupVersions(l *APIGroupList) []string { + var groupVersions []string + for _, g := range l.Groups { + for _, gv := range g.Versions { + groupVersions = append(groupVersions, gv.GroupVersion) + } + } + return groupVersions +} + +// HasAnnotation returns a bool if passed in annotation exists +func HasAnnotation(obj ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +// SetMetaDataAnnotation sets the annotation and value +func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +// SingleObject returns a ListOptions for watching a single object. +func SingleObject(meta ObjectMeta) ListOptions { + return ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name).String(), + ResourceVersion: meta.ResourceVersion, + } +} + +// NewDeleteOptions returns a DeleteOptions indicating the resource should +// be deleted within the specified grace period. Use zero to indicate +// immediate deletion. If you would prefer to use the default grace period, +// use &metav1.DeleteOptions{} directly. +func NewDeleteOptions(grace int64) *DeleteOptions { + return &DeleteOptions{GracePeriodSeconds: &grace} +} + +// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. +func NewPreconditionDeleteOptions(uid string) *DeleteOptions { + u := types.UID(uid) + p := Preconditions{UID: &u} + return &DeleteOptions{Preconditions: &p} +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. +func HasObjectMetaSystemFieldValues(meta Object) bool { + return !meta.GetCreationTimestamp().Time.IsZero() || + len(meta.GetUID()) != 0 +} + +// ResetObjectMetaForStatus forces the meta fields for a status update to match the meta fields +// for a pre-existing object. This is opt-in for new objects with Status subresource. +func ResetObjectMetaForStatus(meta, existingMeta Object) { + meta.SetDeletionTimestamp(existingMeta.GetDeletionTimestamp()) + meta.SetGeneration(existingMeta.GetGeneration()) + meta.SetSelfLink(existingMeta.GetSelfLink()) + meta.SetLabels(existingMeta.GetLabels()) + meta.SetAnnotations(existingMeta.GetAnnotations()) + meta.SetFinalizers(existingMeta.GetFinalizers()) + meta.SetOwnerReferences(existingMeta.GetOwnerReferences()) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go new file mode 100644 index 000000000000..9b45145da669 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// Clones the given selector and returns a new selector with the given key and value added. +// Returns the given selector, if labelKey is empty. +func CloneSelectorAndAddLabel(selector *LabelSelector, labelKey, labelValue string) *LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + + // Clone. + newSelector := selector.DeepCopy() + + if newSelector.MatchLabels == nil { + newSelector.MatchLabels = make(map[string]string) + } + + newSelector.MatchLabels[labelKey] = labelValue + + return newSelector +} + +// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels. +func AddLabelToSelector(selector *LabelSelector, labelKey, labelValue string) *LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + if selector.MatchLabels == nil { + selector.MatchLabels = make(map[string]string) + } + selector.MatchLabels[labelKey] = labelValue + return selector +} + +// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels +func SelectorHasLabel(selector *LabelSelector, labelKey string) bool { + return len(selector.MatchLabels[labelKey]) > 0 +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go new file mode 100644 index 000000000000..ee1447541fcd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -0,0 +1,170 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// TODO: move this, Object, List, and Type to a different package +type ObjectMetaAccessor interface { + GetObjectMeta() Object +} + +// Object lets you work with object metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field (Name, UID, Namespace on lists) will be a no-op and return +// a default value. +type Object interface { + GetNamespace() string + SetNamespace(namespace string) + GetName() string + SetName(name string) + GetGenerateName() string + SetGenerateName(name string) + GetUID() types.UID + SetUID(uid types.UID) + GetResourceVersion() string + SetResourceVersion(version string) + GetGeneration() int64 + SetGeneration(generation int64) + GetSelfLink() string + SetSelfLink(selfLink string) + GetCreationTimestamp() Time + SetCreationTimestamp(timestamp Time) + GetDeletionTimestamp() *Time + SetDeletionTimestamp(timestamp *Time) + GetDeletionGracePeriodSeconds() *int64 + SetDeletionGracePeriodSeconds(*int64) + GetLabels() map[string]string + SetLabels(labels map[string]string) + GetAnnotations() map[string]string + SetAnnotations(annotations map[string]string) + GetInitializers() *Initializers + SetInitializers(initializers *Initializers) + GetFinalizers() []string + SetFinalizers(finalizers []string) + GetOwnerReferences() []OwnerReference + SetOwnerReferences([]OwnerReference) + GetClusterName() string + SetClusterName(clusterName string) +} + +// ListMetaAccessor retrieves the list interface from an object +type ListMetaAccessor interface { + GetListMeta() ListInterface +} + +// Common lets you work with core metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type Common interface { + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) +} + +// ListInterface lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type ListInterface interface { + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) + GetContinue() string + SetContinue(c string) +} + +// Type exposes the type and APIVersion of versioned or internal API objects. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type Type interface { + GetAPIVersion() string + SetAPIVersion(version string) + GetKind() string + SetKind(kind string) +} + +func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ListMeta) GetContinue() string { return meta.Continue } +func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } + +func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ListMeta) GetListMeta() ListInterface { return obj } + +func (obj *ObjectMeta) GetObjectMeta() Object { return obj } + +// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows +// fast, direct access to metadata fields for API objects. +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetGeneration() int64 { return meta.Generation } +func (meta *ObjectMeta) SetGeneration(generation int64) { meta.Generation = generation } +func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetCreationTimestamp() Time { return meta.CreationTimestamp } +func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp Time) { + meta.CreationTimestamp = creationTimestamp +} +func (meta *ObjectMeta) GetDeletionTimestamp() *Time { return meta.DeletionTimestamp } +func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *Time) { + meta.DeletionTimestamp = deletionTimestamp +} +func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 { return meta.DeletionGracePeriodSeconds } +func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { + meta.DeletionGracePeriodSeconds = deletionGracePeriodSeconds +} +func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } +func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } +func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } +func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetInitializers() *Initializers { return meta.Initializers } +func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers } +func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } +func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } +func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return meta.OwnerReferences } +func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { + meta.OwnerReferences = references +} +func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } +func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go new file mode 100644 index 000000000000..6f6c5111bc82 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -0,0 +1,183 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" + + "github.com/google/gofuzz" +) + +const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" + +// MicroTime is version of Time with microsecond level precision. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type MicroTime struct { + time.Time `protobuf:"-"` +} + +// DeepCopy returns a deep-copy of the MicroTime value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *MicroTime) DeepCopyInto(out *MicroTime) { + *out = *t +} + +// String returns the representation of the time. +func (t MicroTime) String() string { + return t.Time.String() +} + +// NewMicroTime returns a wrapped instance of the provided time +func NewMicroTime(time time.Time) MicroTime { + return MicroTime{time} +} + +// DateMicro returns the MicroTime corresponding to the supplied parameters +// by wrapping time.Date. +func DateMicro(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) MicroTime { + return MicroTime{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// NowMicro returns the current local time. +func NowMicro() MicroTime { + return MicroTime{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *MicroTime) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t *MicroTime) Before(u *MicroTime) bool { + return t.Time.Before(u.Time) +} + +// Equal reports whether the time instant t is equal to u. +func (t *MicroTime) Equal(u *MicroTime) bool { + return t.Time.Equal(u.Time) +} + +// BeforeTime reports whether the time instant t is before second-lever precision u. +func (t *MicroTime) BeforeTime(u *Time) bool { + return t.Time.Before(u.Time) +} + +// EqualTime reports whether the time instant t is equal to second-lever precision u. +func (t *MicroTime) EqualTime(u *Time) bool { + return t.Time.Equal(u.Time) +} + +// UnixMicro returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func UnixMicro(sec int64, nsec int64) MicroTime { + return MicroTime{time.Unix(sec, nsec)} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *MicroTime) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pt, err := time.Parse(RFC3339Micro, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *MicroTime) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(RFC3339Micro, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t MicroTime) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + + return json.Marshal(t.UTC().Format(RFC3339Micro)) +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ MicroTime) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ MicroTime) OpenAPISchemaFormat() string { return "date-time" } + +// MarshalQueryParameter converts to a URL query parameter value +func (t MicroTime) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(RFC3339Micro), nil +} + +// Fuzz satisfies fuzz.Interface. +func (t *MicroTime) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Accurate to a tenth of + // micro second. Leave off nanoseconds because JSON doesn't + // represent them so they can't round-trip properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) +} + +var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go new file mode 100644 index 000000000000..14841be512ae --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" +) + +// Timestamp is declared in time_proto.go + +// Timestamp returns the Time as a new Timestamp value. +func (m *MicroTime) ProtoMicroTime() *Timestamp { + if m == nil { + return &Timestamp{} + } + return &Timestamp{ + Seconds: m.Time.Unix(), + Nanos: int32(m.Time.Nanosecond()), + } +} + +// Size implements the protobuf marshalling interface. +func (m *MicroTime) Size() (n int) { + if m == nil || m.Time.IsZero() { + return 0 + } + return m.ProtoMicroTime().Size() +} + +// Reset implements the protobuf marshalling interface. +func (m *MicroTime) Unmarshal(data []byte) error { + if len(data) == 0 { + m.Time = time.Time{} + return nil + } + p := Timestamp{} + if err := p.Unmarshal(data); err != nil { + return err + } + m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + return nil +} + +// Marshal implements the protobuf marshalling interface. +func (m *MicroTime) Marshal() (data []byte, err error) { + if m == nil || m.Time.IsZero() { + return nil, nil + } + return m.ProtoMicroTime().Marshal() +} + +// MarshalTo implements the protobuf marshalling interface. +func (m *MicroTime) MarshalTo(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoMicroTime().MarshalTo(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go new file mode 100644 index 000000000000..0827729d087f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -0,0 +1,97 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Unversioned is group version for unversioned API objects +// TODO: this should be v1 probably +var Unversioned = schema.GroupVersion{Group: "", Version: "v1"} + +// WatchEventKind is name reserved for serializing watch events. +const WatchEventKind = "WatchEvent" + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// AddToGroupVersion registers common meta types into schemas. +func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) { + scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{}) + scheme.AddKnownTypeWithName( + schema.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(WatchEventKind), + &InternalEvent{}, + ) + // Supports legacy code paths, most callers should use metav1.ParameterCodec for now + scheme.AddKnownTypes(groupVersion, + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + &CreateOptions{}, + &UpdateOptions{}, + ) + utilruntime.Must(scheme.AddConversionFuncs( + Convert_v1_WatchEvent_To_watch_Event, + Convert_v1_InternalEvent_To_v1_WatchEvent, + Convert_watch_Event_To_v1_WatchEvent, + Convert_v1_WatchEvent_To_v1_InternalEvent, + )) + // Register Unversioned types under their own special group + scheme.AddUnversionedTypes(Unversioned, + &Status{}, + &APIVersions{}, + &APIGroupList{}, + &APIGroup{}, + &APIResourceList{}, + ) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + utilruntime.Must(AddConversionFuncs(scheme)) + utilruntime.Must(RegisterDefaults(scheme)) +} + +// scheme is the registry for the common types that adhere to the meta v1 API spec. +var scheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +func init() { + scheme.AddUnversionedTypes(SchemeGroupVersion, + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + &CreateOptions{}, + &UpdateOptions{}, + ) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + utilruntime.Must(RegisterDefaults(scheme)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go new file mode 100644 index 000000000000..efff656e1091 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -0,0 +1,185 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" + + "github.com/google/gofuzz" +) + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Time struct { + time.Time `protobuf:"-"` +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// String returns the representation of the time. +func (t Time) String() string { + return t.Time.String() +} + +// NewTime returns a wrapped instance of the provided time +func NewTime(time time.Time) Time { + return Time{time} +} + +// Date returns the Time corresponding to the supplied parameters +// by wrapping time.Date. +func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { + return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// Now returns the current local time. +func Now() Time { + return Time{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *Time) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t *Time) Before(u *Time) bool { + return t.Time.Before(u.Time) +} + +// Equal reports whether the time instant t is equal to u. +func (t *Time) Equal(u *Time) bool { + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false +} + +// Unix returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func Unix(sec int64, nsec int64) Time { + return Time{time.Unix(sec, nsec)} +} + +// Rfc3339Copy returns a copy of the Time at second-level precision. +func (t Time) Rfc3339Copy() Time { + copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339)) + return Time{copied} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *Time) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *Time) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + + return json.Marshal(t.UTC().Format(time.RFC3339)) +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Time) OpenAPISchemaFormat() string { return "date-time" } + +// MarshalQueryParameter converts to a URL query parameter value +func (t Time) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(time.RFC3339), nil +} + +// Fuzz satisfies fuzz.Interface. +func (t *Time) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Leave off nanoseconds + // because JSON doesn't represent them so they can't round-trip + // properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) +} + +var _ fuzz.Interface = &Time{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go new file mode 100644 index 000000000000..ed72186b49fe --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -0,0 +1,92 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" +) + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `json:"seconds" protobuf:"varint,1,opt,name=seconds"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + Nanos int32 `json:"nanos" protobuf:"varint,2,opt,name=nanos"` +} + +// Timestamp returns the Time as a new Timestamp value. +func (m *Time) ProtoTime() *Timestamp { + if m == nil { + return &Timestamp{} + } + return &Timestamp{ + Seconds: m.Time.Unix(), + // leaving this here for the record. our JSON only handled seconds, so this results in writes by + // protobuf clients storing values that aren't read by json clients, which results in unexpected + // field mutation, which fails various validation and equality code. + // Nanos: int32(m.Time.Nanosecond()), + } +} + +// Size implements the protobuf marshalling interface. +func (m *Time) Size() (n int) { + if m == nil || m.Time.IsZero() { + return 0 + } + return m.ProtoTime().Size() +} + +// Reset implements the protobuf marshalling interface. +func (m *Time) Unmarshal(data []byte) error { + if len(data) == 0 { + m.Time = time.Time{} + return nil + } + p := Timestamp{} + if err := p.Unmarshal(data); err != nil { + return err + } + // leaving this here for the record. our JSON only handled seconds, so this results in writes by + // protobuf clients storing values that aren't read by json clients, which results in unexpected + // field mutation, which fails various validation and equality code. + // m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + m.Time = time.Unix(p.Seconds, int64(0)).Local() + return nil +} + +// Marshal implements the protobuf marshalling interface. +func (m *Time) Marshal() (data []byte, err error) { + if m == nil || m.Time.IsZero() { + return nil, nil + } + return m.ProtoTime().Marshal() +} + +// MarshalTo implements the protobuf marshalling interface. +func (m *Time) MarshalTo(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalTo(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go new file mode 100644 index 000000000000..f390bf02fc4f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -0,0 +1,1011 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API types that are common to all versions. +// +// The package contains two categories of types: +// - external (serialized) types that lack their own version (e.g TypeMeta) +// - internal (never-serialized) types that are needed by several different +// api groups, and so live here, to avoid duplication and/or import loops +// (e.g. LabelSelector). +// In the future, we will probably move these categories of objects into +// separate packages. +package v1 + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false +type TypeMeta struct { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +type ListMeta struct { + // selfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` + + // continue may be set if the user set a limit on the number of items returned, and indicates that + // the server has more data available. The value is opaque and may be used to issue another request + // to the endpoint that served this list to retrieve the next set of available objects. Continuing a + // consistent list may not be possible if the server configuration has changed or more than a few + // minutes have passed. The resourceVersion field returned when using this continue value will be + // identical to the value in the first response, unless you have received this token from an error + // message. + Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"` +} + +// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here +const ( + FinalizerOrphanDependents string = "orphan" + FinalizerDeleteDependents string = "foregroundDeletion" +) + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + // + // DEPRECATED - initializers are an alpha field and will be removed in v1.15. + Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + // +patchStrategy=merge + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` +} + +// Initializers tracks the progress of initialization. +type Initializers struct { + // Pending is a list of initializers that must execute in order before this object is visible. + // When the last pending initializer is removed, and no failing result is set, the initializers + // struct will be set to nil and the object is considered as initialized and visible to all + // clients. + // +patchMergeKey=name + // +patchStrategy=merge + Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending" patchStrategy:"merge" patchMergeKey:"name"` + // If result is set with the Failure field, the object will be persisted to storage and then deleted, + // ensuring that other clients can observe the deletion. + Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"` +} + +// Initializer is information about an initializer that has not yet completed. +type Initializer struct { + // name of the process that is responsible for initializing this object. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" +) + +// OwnerReference contains enough information to let you identify an owning +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. +type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // If true, this reference points to the managing controller. + // +optional + Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ListOptions is the query options to a standard REST list call. +type ListOptions struct { + TypeMeta `json:",inline"` + + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` + + // +k8s:deprecated=includeUninitialized,protobuf=6 + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` + // Timeout for the list/watch call. + // This limits the duration of the call, regardless of any activity or inactivity. + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` + + // limit is a maximum number of responses to return for a list call. If more items exist, the + // server will set the `continue` field on the list metadata to a value that can be used with the + // same initial query to retrieve the next set of results. Setting a limit may return fewer than + // the requested amount of items (up to zero items) in the event all requested objects are + // filtered out and clients should only use the presence of the continue field to determine whether + // more results are available. Servers may choose not to support the limit argument and will return + // all of the available results. If limit is specified and the continue field is empty, clients may + // assume that no more results are available. This field is not supported if watch is true. + // + // The server guarantees that the objects returned when using continue will be identical to issuing + // a single list call without a limit - that is, no objects created, modified, or deleted after the + // first request is issued will be included in any subsequent continued requests. This is sometimes + // referred to as a consistent snapshot, and ensures that a client that is using limit to receive + // smaller chunks of a very large result can ensure they see all possible objects. If objects are + // updated during a chunked list the version of the object that was present at the time the first list + // result was calculated is returned. + Limit int64 `json:"limit,omitempty" protobuf:"varint,7,opt,name=limit"` + // The continue option should be set when retrieving more results from the server. Since this value is + // server defined, clients may only use the continue value from a previous query result with identical + // query parameters (except for the value of continue) and the server may reject a continue value it + // does not recognize. If the specified continue value is no longer valid whether due to expiration + // (generally five to fifteen minutes) or a configuration change on the server, the server will + // respond with a 410 ResourceExpired error together with a continue token. If the client needs a + // consistent list, it must restart their list without the continue field. Otherwise, the client may + // send another list request with the token received with the 410 error, the server will respond with + // a list starting from the next key, but from the latest snapshot, which is inconsistent from the + // previous list results - objects that are created, modified, or deleted after the first list request + // will be included in the response, as long as their keys are after the "next key". + // + // This field is not supported when watch is true. Clients may start a watch from the last + // resourceVersion value returned by the server and not miss any modifications. + Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExportOptions is the query options to the standard REST get call. +type ExportOptions struct { + TypeMeta `json:",inline"` + // Should this value be exported. Export strips fields that a user can not specify. + Export bool `json:"export" protobuf:"varint,1,opt,name=export"` + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GetOptions is the standard query options to the standard REST get call. +type GetOptions struct { + TypeMeta `json:",inline"` + // When specified: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"` + // +k8s:deprecated=includeUninitialized,protobuf=2 +} + +// DeletionPropagation decides if a deletion will propagate to the dependents of +// the object, and how the garbage collector will handle the propagation. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will + // delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector + // deletes all the dependents whose ownerReference.blockOwnerDeletion=true + // from the key-value store. API sever will put the "foregroundDeletion" + // finalizer on the object, and sets its deletionTimestamp. This policy is + // cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +const ( + // DryRunAll means to complete all processing stages, but don't + // persist changes to storage. + DryRunAll = "All" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeleteOptions may be provided when deleting an API object. +type DeleteOptions struct { + TypeMeta `json:",inline"` + + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + // +optional + PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CreateOptions may be provided when creating an API object. +type CreateOptions struct { + TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` + // +k8s:deprecated=includeUninitialized,protobuf=2 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UpdateOptions may be provided when updating an API object. +type UpdateOptions struct { + TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Status is a return value for calls that don't return other objects. +type Status struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // A human-readable description of the status of this operation. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"` + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` + // Suggested HTTP return code for this status, 0 if not set. + // +optional + Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"` +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +type StatusDetails struct { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // The group attribute of the resource associated with the status StatusReason. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` + // UID of the resource. + // (when there is a single resource which can be described). + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` + // If specified, the time in seconds before the operation should be retried. Some errors may indicate + // the client must take an alternate action - for those errors this field may indicate how long to wait + // before taking the alternate action. + // +optional + RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"` +} + +// Values of Status.Status +const ( + StatusSuccess = "Success" + StatusFailure = "Failure" +) + +// StatusReason is an enumeration of possible failure causes. Each StatusReason +// must map to a single HTTP status code, but multiple reasons may map +// to the same HTTP status code. +// TODO: move to apiserver +type StatusReason string + +const ( + // StatusReasonUnknown means the server has declined to indicate a specific reason. + // The details field may contain other information about this error. + // Status code 500. + StatusReasonUnknown StatusReason = "" + + // StatusReasonUnauthorized means the server can be reached and understood the request, but requires + // the user to present appropriate authorization credentials (identified by the WWW-Authenticate header) + // in order for the action to be completed. If the user has specified credentials on the request, the + // server considers them insufficient. + // Status code 401 + StatusReasonUnauthorized StatusReason = "Unauthorized" + + // StatusReasonForbidden means the server can be reached and understood the request, but refuses + // to take any further action. It is the result of the server being configured to deny access for some reason + // to the requested resource by the client. + // Details (optional): + // "kind" string - the kind attribute of the forbidden resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the forbidden resource + // Status code 403 + StatusReasonForbidden StatusReason = "Forbidden" + + // StatusReasonNotFound means one or more resources required for this operation + // could not be found. + // Details (optional): + // "kind" string - the kind attribute of the missing resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the missing resource + // Status code 404 + StatusReasonNotFound StatusReason = "NotFound" + + // StatusReasonAlreadyExists means the resource you are creating already exists. + // Details (optional): + // "kind" string - the kind attribute of the conflicting resource + // "id" string - the identifier of the conflicting resource + // Status code 409 + StatusReasonAlreadyExists StatusReason = "AlreadyExists" + + // StatusReasonConflict means the requested operation cannot be completed + // due to a conflict in the operation. The client may need to alter the + // request. Each resource may define custom details that indicate the + // nature of the conflict. + // Status code 409 + StatusReasonConflict StatusReason = "Conflict" + + // StatusReasonGone means the item is no longer available at the server and no + // forwarding address is known. + // Status code 410 + StatusReasonGone StatusReason = "Gone" + + // StatusReasonInvalid means the requested create or update operation cannot be + // completed due to invalid data provided as part of the request. The client may + // need to alter the request. When set, the client may use the StatusDetails + // message field as a summary of the issues encountered. + // Details (optional): + // "kind" string - the kind attribute of the invalid resource + // "id" string - the identifier of the invalid resource + // "causes" - one or more StatusCause entries indicating the data in the + // provided resource that was invalid. The code, message, and + // field attributes will be set. + // Status code 422 + StatusReasonInvalid StatusReason = "Invalid" + + // StatusReasonServerTimeout means the server can be reached and understood the request, + // but cannot complete the action in a reasonable time. The client should retry the request. + // This is may be due to temporary server load or a transient communication issue with + // another server. Status code 500 is used because the HTTP spec provides no suitable + // server-requested client retry and the 5xx class represents actionable errors. + // Details (optional): + // "kind" string - the kind attribute of the resource being acted on. + // "id" string - the operation that is being attempted. + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 500 + StatusReasonServerTimeout StatusReason = "ServerTimeout" + + // StatusReasonTimeout means that the request could not be completed within the given time. + // Clients can get this response only when they specified a timeout param in the request, + // or if the server cannot complete the operation within a reasonable amount of time. + // The request might succeed with an increased value of timeout param. The client *should* + // wait at least the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 504 + StatusReasonTimeout StatusReason = "Timeout" + + // StatusReasonTooManyRequests means the server experienced too many requests within a + // given window and that the client must wait to perform the action again. A client may + // always retry the request that led to this error, although the client should wait at least + // the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 429 + StatusReasonTooManyRequests StatusReason = "TooManyRequests" + + // StatusReasonBadRequest means that the request itself was invalid, because the request + // doesn't make any sense, for example deleting a read-only object. This is different than + // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the + // data was invalid. API calls that return BadRequest can never succeed. + StatusReasonBadRequest StatusReason = "BadRequest" + + // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the + // resource was not supported by the code - for instance, attempting to delete a resource that + // can only be created. API calls that return MethodNotAllowed can never succeed. + StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" + + // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable + // to the server - for instance, attempting to receive protobuf for a resource that supports only json and yaml. + // API calls that return NotAcceptable can never succeed. + // Status code 406 + StatusReasonNotAcceptable StatusReason = "NotAcceptable" + + // StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable + // to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml. + // API calls that return UnsupportedMediaType can never succeed. + // Status code 415 + StatusReasonUnsupportedMediaType StatusReason = "UnsupportedMediaType" + + // StatusReasonInternalError indicates that an internal error occurred, it is unexpected + // and the outcome of the call is unknown. + // Details (optional): + // "causes" - The original error + // Status code 500 + StatusReasonInternalError StatusReason = "InternalError" + + // StatusReasonExpired indicates that the request is invalid because the content you are requesting + // has expired and is no longer available. It is typically associated with watches that can't be + // serviced. + // Status code 410 (gone) + StatusReasonExpired StatusReason = "Expired" + + // StatusReasonServiceUnavailable means that the request itself was valid, + // but the requested service is unavailable at this time. + // Retrying the request after some time might succeed. + // Status code 503 + StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable" +) + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +type StatusCause struct { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"` + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"` +} + +// CauseType is a machine readable value providing more detail about what +// occurred in a status response. An operation may have multiple causes for a +// status (whether Failure or Success). +type CauseType string + +const ( + // CauseTypeFieldValueNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). + CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound" + // CauseTypeFieldValueRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). + CauseTypeFieldValueRequired CauseType = "FieldValueRequired" + // CauseTypeFieldValueDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). + CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate" + // CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex + // match). + CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid" + // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) + // values that can not be handled (e.g. an enumerated string). + CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" + // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client + // without the expected return type. The presence of this cause indicates the error may be + // due to an intervening proxy or the server software malfunctioning. + CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of objects + Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type APIVersions struct { + TypeMeta `json:",inline"` + // versions are the api versions that are available. + Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +type APIGroupList struct { + TypeMeta `json:",inline"` + // groups is a list of APIGroup. + Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +type APIGroup struct { + TypeMeta `json:",inline"` + // name is the name of the group. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // versions are the versions supported in this group. + Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +optional + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +type ServerAddressByClientCIDR struct { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +type GroupVersionForDiscovery struct { + // groupVersion specifies the API group and version in the form "group/version" + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` +} + +// APIResource specifies the name of a resource and whether it is namespaced. +type APIResource struct { + // name is the plural name of the resource. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // The singularName is more correct for reporting status on a single item and both singular and plural are allowed + // from the kubectl CLI interface. + SingularName string `json:"singularName" protobuf:"bytes,6,opt,name=singularName"` + // namespaced indicates if a resource is namespaced or not. + Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` + // group is the preferred group of the resource. Empty implies the group of the containing resource list. + // For subresources, this may have a different value, for example: Scale". + Group string `json:"group,omitempty" protobuf:"bytes,8,opt,name=group"` + // version is the preferred version of the resource. Empty implies the version of the containing resource list + // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". + Version string `json:"version,omitempty" protobuf:"bytes,9,opt,name=version"` + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` + // shortNames is a list of suggested short names of the resource. + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` + // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` + // The hash value of the storage version, the version this resource is + // converted to when written to the data store. Value must be treated + // as opaque by clients. Only equality comparison on the value is valid. + // This is an alpha feature and may change or be removed in the future. + // The field is populated by the apiserver only if the + // StorageVersionHash feature gate is enabled. + // This field will remain optional even if it graduates. + // +optional + StorageVersionHash string `json:"storageVersionHash,omitempty" protobuf:"bytes,10,opt,name=storageVersionHash"` +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Verbs []string + +func (vs Verbs) String() string { + return fmt.Sprintf("%v", []string(vs)) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +type APIResourceList struct { + TypeMeta `json:",inline"` + // groupVersion is the group and version this APIResourceList is for. + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` + // resources contains the name of the resources and if they are namespaced. + APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +type RootPaths struct { + // paths are the paths available at root. + Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` +} + +// TODO: remove me when watch is refactored +func LabelSelectorQueryParam(version string) string { + return "labelSelector" +} + +// TODO: remove me when watch is refactored +func FieldSelectorQueryParam(version string) string { + return "fieldSelector" +} + +// String returns available api versions as a human-friendly version string. +func (apiVersions APIVersions) String() string { + return strings.Join(apiVersions.Versions, ",") +} + +func (apiVersions APIVersions) GoString() string { + return apiVersions.String() +} + +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +type Patch struct{} + +// Note: +// There are two different styles of label selectors used in versioned types: +// an older style which is represented as just a string in versioned types, and a +// newer style that is structured. LabelSelector is an internal representation for the +// latter style. + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +type LabelSelector struct { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type LabelSelectorRequirement struct { + // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists and DoesNotExist. + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A label selector operator is the set of operators that can be used in a selector requirement. +type LabelSelectorOperator string + +const ( + LabelSelectorOpIn LabelSelectorOperator = "In" + LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" + LabelSelectorOpExists LabelSelectorOperator = "Exists" + LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..14a17f1bf57b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -0,0 +1,348 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_APIGroup = map[string]string{ + "": "APIGroup contains the name, the supported versions, and the preferred version of a group.", + "name": "name is the name of the group.", + "versions": "versions are the versions supported in this group.", + "preferredVersion": "preferredVersion is the version preferred by the API server, which probably is the storage version.", + "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", +} + +func (APIGroup) SwaggerDoc() map[string]string { + return map_APIGroup +} + +var map_APIGroupList = map[string]string{ + "": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", + "groups": "groups is a list of APIGroup.", +} + +func (APIGroupList) SwaggerDoc() map[string]string { + return map_APIGroupList +} + +var map_APIResource = map[string]string{ + "": "APIResource specifies the name of a resource and whether it is namespaced.", + "name": "name is the plural name of the resource.", + "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "namespaced": "namespaced indicates if a resource is namespaced or not.", + "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "shortNames": "shortNames is a list of suggested short names of the resource.", + "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "storageVersionHash": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", +} + +func (APIResource) SwaggerDoc() map[string]string { + return map_APIResource +} + +var map_APIResourceList = map[string]string{ + "": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + "groupVersion": "groupVersion is the group and version this APIResourceList is for.", + "resources": "resources contains the name of the resources and if they are namespaced.", +} + +func (APIResourceList) SwaggerDoc() map[string]string { + return map_APIResourceList +} + +var map_APIVersions = map[string]string{ + "": "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", + "versions": "versions are the api versions that are available.", + "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", +} + +func (APIVersions) SwaggerDoc() map[string]string { + return map_APIVersions +} + +var map_CreateOptions = map[string]string{ + "": "CreateOptions may be provided when creating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", +} + +func (CreateOptions) SwaggerDoc() map[string]string { + return map_CreateOptions +} + +var map_DeleteOptions = map[string]string{ + "": "DeleteOptions may be provided when deleting an API object.", + "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", +} + +func (DeleteOptions) SwaggerDoc() map[string]string { + return map_DeleteOptions +} + +var map_ExportOptions = map[string]string{ + "": "ExportOptions is the query options to the standard REST get call.", + "export": "Should this value be exported. Export strips fields that a user can not specify.", + "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", +} + +func (ExportOptions) SwaggerDoc() map[string]string { + return map_ExportOptions +} + +var map_GetOptions = map[string]string{ + "": "GetOptions is the standard query options to the standard REST get call.", + "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", +} + +func (GetOptions) SwaggerDoc() map[string]string { + return map_GetOptions +} + +var map_GroupVersionForDiscovery = map[string]string{ + "": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.", + "groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"", + "version": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", +} + +func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { + return map_GroupVersionForDiscovery +} + +var map_Initializer = map[string]string{ + "": "Initializer is information about an initializer that has not yet completed.", + "name": "name of the process that is responsible for initializing this object.", +} + +func (Initializer) SwaggerDoc() map[string]string { + return map_Initializer +} + +var map_Initializers = map[string]string{ + "": "Initializers tracks the progress of initialization.", + "pending": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.", + "result": "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.", +} + +func (Initializers) SwaggerDoc() map[string]string { + return map_Initializers +} + +var map_LabelSelector = map[string]string{ + "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", +} + +func (LabelSelector) SwaggerDoc() map[string]string { + return map_LabelSelector +} + +var map_LabelSelectorRequirement = map[string]string{ + "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the label key that the selector applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", +} + +func (LabelSelectorRequirement) SwaggerDoc() map[string]string { + return map_LabelSelectorRequirement +} + +var map_List = map[string]string{ + "": "List holds a list of objects, which may not be known by the server.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of objects", +} + +func (List) SwaggerDoc() map[string]string { + return map_List +} + +var map_ListMeta = map[string]string{ + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", +} + +func (ListMeta) SwaggerDoc() map[string]string { + return map_ListMeta +} + +var map_ListOptions = map[string]string{ + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", +} + +func (ListOptions) SwaggerDoc() map[string]string { + return map_ListOptions +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.\n\nDEPRECATED - initializers are an alpha field and will be removed in v1.15.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_OwnerReference = map[string]string{ + "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", + "apiVersion": "API version of the referent.", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "controller": "If true, this reference points to the managing controller.", + "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", +} + +func (OwnerReference) SwaggerDoc() map[string]string { + return map_OwnerReference +} + +var map_Patch = map[string]string{ + "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", +} + +func (Patch) SwaggerDoc() map[string]string { + return map_Patch +} + +var map_Preconditions = map[string]string{ + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", +} + +func (Preconditions) SwaggerDoc() map[string]string { + return map_Preconditions +} + +var map_RootPaths = map[string]string{ + "": "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".", + "paths": "paths are the paths available at root.", +} + +func (RootPaths) SwaggerDoc() map[string]string { + return map_RootPaths +} + +var map_ServerAddressByClientCIDR = map[string]string{ + "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", + "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.", + "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", +} + +func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { + return map_ServerAddressByClientCIDR +} + +var map_Status = map[string]string{ + "": "Status is a return value for calls that don't return other objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "message": "A human-readable description of the status of this operation.", + "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", + "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", + "code": "Suggested HTTP return code for this status, 0 if not set.", +} + +func (Status) SwaggerDoc() map[string]string { + return map_Status +} + +var map_StatusCause = map[string]string{ + "": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", + "reason": "A machine-readable description of the cause of the error. If this value is empty there is no information available.", + "message": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", + "field": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", +} + +func (StatusCause) SwaggerDoc() map[string]string { + return map_StatusCause +} + +var map_StatusDetails = map[string]string{ + "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", + "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", + "group": "The group attribute of the resource associated with the status StatusReason.", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", + "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", +} + +func (StatusDetails) SwaggerDoc() map[string]string { + return map_StatusDetails +} + +var map_TypeMeta = map[string]string{ + "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", +} + +func (TypeMeta) SwaggerDoc() map[string]string { + return map_TypeMeta +} + +var map_UpdateOptions = map[string]string{ + "": "UpdateOptions may be provided when updating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", +} + +func (UpdateOptions) SwaggerDoc() map[string]string { + return map_UpdateOptions +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go new file mode 100644 index 000000000000..58f077380334 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go @@ -0,0 +1,89 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +// Event represents a single event to a watched resource. +// +// +protobuf=true +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WatchEvent struct { + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *Status is recommended; other types may make sense + // depending on context. + Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"` +} + +func Convert_watch_Event_To_v1_WatchEvent(in *watch.Event, out *WatchEvent, s conversion.Scope) error { + out.Type = string(in.Type) + switch t := in.Object.(type) { + case *runtime.Unknown: + // TODO: handle other fields on Unknown and detect type + out.Object.Raw = t.Raw + case nil: + default: + out.Object.Object = in.Object + } + return nil +} + +func Convert_v1_InternalEvent_To_v1_WatchEvent(in *InternalEvent, out *WatchEvent, s conversion.Scope) error { + return Convert_watch_Event_To_v1_WatchEvent((*watch.Event)(in), out, s) +} + +func Convert_v1_WatchEvent_To_watch_Event(in *WatchEvent, out *watch.Event, s conversion.Scope) error { + out.Type = watch.EventType(in.Type) + if in.Object.Object != nil { + out.Object = in.Object.Object + } else if in.Object.Raw != nil { + // TODO: handle other fields on Unknown and detect type + out.Object = &runtime.Unknown{ + Raw: in.Object.Raw, + ContentType: runtime.ContentTypeJSON, + } + } + return nil +} + +func Convert_v1_WatchEvent_To_v1_InternalEvent(in *WatchEvent, out *InternalEvent, s conversion.Scope) error { + return Convert_v1_WatchEvent_To_watch_Event(in, (*watch.Event)(out), s) +} + +// InternalEvent makes watch.Event versioned +// +protobuf=false +type InternalEvent watch.Event + +func (e *InternalEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (e *WatchEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (e *InternalEvent) DeepCopyObject() runtime.Object { + if c := e.DeepCopy(); c != nil { + return c + } else { + return nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..10845993e26b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -0,0 +1,961 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroup) DeepCopyInto(out *APIGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]GroupVersionForDiscovery, len(*in)) + copy(*out, *in) + } + out.PreferredVersion = in.PreferredVersion + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroup. +func (in *APIGroup) DeepCopy() *APIGroup { + if in == nil { + return nil + } + out := new(APIGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupList) DeepCopyInto(out *APIGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]APIGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupList. +func (in *APIGroupList) DeepCopy() *APIGroupList { + if in == nil { + return nil + } + out := new(APIGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResource) DeepCopyInto(out *APIResource) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make(Verbs, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResource. +func (in *APIResource) DeepCopy() *APIResource { + if in == nil { + return nil + } + out := new(APIResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResourceList) DeepCopyInto(out *APIResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.APIResources != nil { + in, out := &in.APIResources, &out.APIResources + *out = make([]APIResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceList. +func (in *APIResourceList) DeepCopy() *APIResourceList { + if in == nil { + return nil + } + out := new(APIResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIVersions) DeepCopyInto(out *APIVersions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersions. +func (in *APIVersions) DeepCopy() *APIVersions { + if in == nil { + return nil + } + out := new(APIVersions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIVersions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreateOptions) DeepCopyInto(out *CreateOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateOptions. +func (in *CreateOptions) DeepCopy() *CreateOptions { + if in == nil { + return nil + } + out := new(CreateOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CreateOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(Preconditions) + (*in).DeepCopyInto(*out) + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions. +func (in *DeleteOptions) DeepCopy() *DeleteOptions { + if in == nil { + return nil + } + out := new(DeleteOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Duration) DeepCopyInto(out *Duration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Duration. +func (in *Duration) DeepCopy() *Duration { + if in == nil { + return nil + } + out := new(Duration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportOptions) DeepCopyInto(out *ExportOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportOptions. +func (in *ExportOptions) DeepCopy() *ExportOptions { + if in == nil { + return nil + } + out := new(ExportOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExportOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GetOptions) DeepCopyInto(out *GetOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetOptions. +func (in *GetOptions) DeepCopy() *GetOptions { + if in == nil { + return nil + } + out := new(GetOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GetOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupKind) DeepCopyInto(out *GroupKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupKind. +func (in *GroupKind) DeepCopy() *GroupKind { + if in == nil { + return nil + } + out := new(GroupKind) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResource) DeepCopyInto(out *GroupResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource. +func (in *GroupResource) DeepCopy() *GroupResource { + if in == nil { + return nil + } + out := new(GroupResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersion) DeepCopyInto(out *GroupVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersion. +func (in *GroupVersion) DeepCopy() *GroupVersion { + if in == nil { + return nil + } + out := new(GroupVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionForDiscovery) DeepCopyInto(out *GroupVersionForDiscovery) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionForDiscovery. +func (in *GroupVersionForDiscovery) DeepCopy() *GroupVersionForDiscovery { + if in == nil { + return nil + } + out := new(GroupVersionForDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionKind) DeepCopyInto(out *GroupVersionKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionKind. +func (in *GroupVersionKind) DeepCopy() *GroupVersionKind { + if in == nil { + return nil + } + out := new(GroupVersionKind) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource. +func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { + if in == nil { + return nil + } + out := new(GroupVersionResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Initializer) DeepCopyInto(out *Initializer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. +func (in *Initializer) DeepCopy() *Initializer { + if in == nil { + return nil + } + out := new(Initializer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Initializers) DeepCopyInto(out *Initializers) { + *out = *in + if in.Pending != nil { + in, out := &in.Pending, &out.Pending + *out = make([]Initializer, len(*in)) + copy(*out, *in) + } + if in.Result != nil { + in, out := &in.Result, &out.Result + *out = new(Status) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializers. +func (in *Initializers) DeepCopy() *Initializers { + if in == nil { + return nil + } + out := new(Initializers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalEvent) DeepCopyInto(out *InternalEvent) { + *out = *in + if in.Object != nil { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalEvent. +func (in *InternalEvent) DeepCopy() *InternalEvent { + if in == nil { + return nil + } + out := new(InternalEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelector) DeepCopyInto(out *LabelSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelector. +func (in *LabelSelector) DeepCopy() *LabelSelector { + if in == nil { + return nil + } + out := new(LabelSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelectorRequirement) DeepCopyInto(out *LabelSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorRequirement. +func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement { + if in == nil { + return nil + } + out := new(LabelSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListMeta) DeepCopyInto(out *ListMeta) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListMeta. +func (in *ListMeta) DeepCopy() *ListMeta { + if in == nil { + return nil + } + out := new(ListMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime. +func (in *MicroTime) DeepCopy() *MicroTime { + if in == nil { + return nil + } + out := new(MicroTime) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + *out = new(Initializers) + (*in).DeepCopyInto(*out) + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnerReference) DeepCopyInto(out *OwnerReference) { + *out = *in + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = new(bool) + **out = **in + } + if in.BlockOwnerDeletion != nil { + in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerReference. +func (in *OwnerReference) DeepCopy() *OwnerReference { + if in == nil { + return nil + } + out := new(OwnerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Patch) DeepCopyInto(out *Patch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Patch. +func (in *Patch) DeepCopy() *Patch { + if in == nil { + return nil + } + out := new(Patch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { + return nil + } + out := new(Preconditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootPaths) DeepCopyInto(out *RootPaths) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootPaths. +func (in *RootPaths) DeepCopy() *RootPaths { + if in == nil { + return nil + } + out := new(RootPaths) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerAddressByClientCIDR) DeepCopyInto(out *ServerAddressByClientCIDR) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerAddressByClientCIDR. +func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { + if in == nil { + return nil + } + out := new(ServerAddressByClientCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(StatusDetails) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Status) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusCause) DeepCopyInto(out *StatusCause) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCause. +func (in *StatusCause) DeepCopy() *StatusCause { + if in == nil { + return nil + } + out := new(StatusCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusDetails) DeepCopyInto(out *StatusDetails) { + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]StatusCause, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDetails. +func (in *StatusDetails) DeepCopy() *StatusDetails { + if in == nil { + return nil + } + out := new(StatusDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Timestamp) DeepCopyInto(out *Timestamp) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timestamp. +func (in *Timestamp) DeepCopy() *Timestamp { + if in == nil { + return nil + } + out := new(Timestamp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateOptions) DeepCopyInto(out *UpdateOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateOptions. +func (in *UpdateOptions) DeepCopy() *UpdateOptions { + if in == nil { + return nil + } + out := new(UpdateOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UpdateOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Verbs) DeepCopyInto(out *Verbs) { + { + in := &in + *out = make(Verbs, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Verbs. +func (in Verbs) DeepCopy() Verbs { + if in == nil { + return nil + } + out := new(Verbs) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatchEvent) DeepCopyInto(out *WatchEvent) { + *out = *in + in.Object.DeepCopyInto(&out.Object) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchEvent. +func (in *WatchEvent) DeepCopy() *WatchEvent { + if in == nil { + return nil + } + out := new(WatchEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WatchEvent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go new file mode 100644 index 000000000000..cce2e603a69a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go new file mode 100644 index 000000000000..bc615dc3ace0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -0,0 +1,898 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "fmt" + "reflect" +) + +type typePair struct { + source reflect.Type + dest reflect.Type +} + +type typeNamePair struct { + fieldType reflect.Type + fieldName string +} + +// DebugLogger allows you to get debugging messages if necessary. +type DebugLogger interface { + Logf(format string, args ...interface{}) +} + +type NameFunc func(t reflect.Type) string + +var DefaultNameFunc = func(t reflect.Type) string { return t.Name() } + +// ConversionFunc converts the object a into the object b, reusing arrays or objects +// or pointers if necessary. It should return an error if the object cannot be converted +// or if some data is invalid. If you do not wish a and b to share fields or nested +// objects, you must copy a before calling this function. +type ConversionFunc func(a, b interface{}, scope Scope) error + +// Converter knows how to convert one type to another. +type Converter struct { + // Map from the conversion pair to a function which can + // do the conversion. + conversionFuncs ConversionFuncs + generatedConversionFuncs ConversionFuncs + + // Set of conversions that should be treated as a no-op + ignoredConversions map[typePair]struct{} + + // This is a map from a source field type and name, to a list of destination + // field type and name. + structFieldDests map[typeNamePair][]typeNamePair + + // Allows for the opposite lookup of structFieldDests. So that SourceFromDest + // copy flag also works. So this is a map of destination field name, to potential + // source field name and type to look for. + structFieldSources map[typeNamePair][]typeNamePair + + // Map from an input type to a function which can apply a key name mapping + inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc + + // Map from an input type to a set of default conversion flags. + inputDefaultFlags map[reflect.Type]FieldMatchingFlags + + // If non-nil, will be called to print helpful debugging info. Quite verbose. + Debug DebugLogger + + // nameFunc is called to retrieve the name of a type; this name is used for the + // purpose of deciding whether two types match or not (i.e., will we attempt to + // do a conversion). The default returns the go type name. + nameFunc func(t reflect.Type) string +} + +// NewConverter creates a new Converter object. +func NewConverter(nameFn NameFunc) *Converter { + c := &Converter{ + conversionFuncs: NewConversionFuncs(), + generatedConversionFuncs: NewConversionFuncs(), + ignoredConversions: make(map[typePair]struct{}), + nameFunc: nameFn, + structFieldDests: make(map[typeNamePair][]typeNamePair), + structFieldSources: make(map[typeNamePair][]typeNamePair), + + inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc), + inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags), + } + c.RegisterConversionFunc(Convert_Slice_byte_To_Slice_byte) + return c +} + +// WithConversions returns a Converter that is a copy of c but with the additional +// fns merged on top. +func (c *Converter) WithConversions(fns ConversionFuncs) *Converter { + copied := *c + copied.conversionFuncs = c.conversionFuncs.Merge(fns) + return &copied +} + +// DefaultMeta returns the conversion FieldMappingFunc and meta for a given type. +func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) { + return c.inputDefaultFlags[t], &Meta{ + KeyNameMapping: c.inputFieldMappingFuncs[t], + } +} + +// Convert_Slice_byte_To_Slice_byte prevents recursing into every byte +func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error { + if *in == nil { + *out = nil + return nil + } + *out = make([]byte, len(*in)) + copy(*out, *in) + return nil +} + +// Scope is passed to conversion funcs to allow them to continue an ongoing conversion. +// If multiple converters exist in the system, Scope will allow you to use the correct one +// from a conversion function--that is, the one your conversion function was called by. +type Scope interface { + // Call Convert to convert sub-objects. Note that if you call it with your own exact + // parameters, you'll run out of stack space before anything useful happens. + Convert(src, dest interface{}, flags FieldMatchingFlags) error + + // DefaultConvert performs the default conversion, without calling a conversion func + // on the current stack frame. This makes it safe to call from a conversion func. + DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error + + // SrcTags and DestTags contain the struct tags that src and dest had, respectively. + // If the enclosing object was not a struct, then these will contain no tags, of course. + SrcTag() reflect.StructTag + DestTag() reflect.StructTag + + // Flags returns the flags with which the conversion was started. + Flags() FieldMatchingFlags + + // Meta returns any information originally passed to Convert. + Meta() *Meta +} + +// FieldMappingFunc can convert an input field value into different values, depending on +// the value of the source or destination struct tags. +type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string) + +func NewConversionFuncs() ConversionFuncs { + return ConversionFuncs{ + fns: make(map[typePair]reflect.Value), + untyped: make(map[typePair]ConversionFunc), + } +} + +type ConversionFuncs struct { + fns map[typePair]reflect.Value + untyped map[typePair]ConversionFunc +} + +// Add adds the provided conversion functions to the lookup table - they must have the signature +// `func(type1, type2, Scope) error`. Functions are added in the order passed and will override +// previously registered pairs. +func (c ConversionFuncs) Add(fns ...interface{}) error { + for _, fn := range fns { + fv := reflect.ValueOf(fn) + ft := fv.Type() + if err := verifyConversionFunctionSignature(ft); err != nil { + return err + } + c.fns[typePair{ft.In(0).Elem(), ft.In(1).Elem()}] = fv + } + return nil +} + +// AddUntyped adds the provided conversion function to the lookup table for the types that are +// supplied as a and b. a and b must be pointers or an error is returned. This method overwrites +// previously defined functions. +func (c ConversionFuncs) AddUntyped(a, b interface{}, fn ConversionFunc) error { + tA, tB := reflect.TypeOf(a), reflect.TypeOf(b) + if tA.Kind() != reflect.Ptr { + return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", a) + } + if tB.Kind() != reflect.Ptr { + return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", b) + } + c.untyped[typePair{tA, tB}] = fn + return nil +} + +// Merge returns a new ConversionFuncs that contains all conversions from +// both other and c, with other conversions taking precedence. +func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs { + merged := NewConversionFuncs() + for k, v := range c.fns { + merged.fns[k] = v + } + for k, v := range other.fns { + merged.fns[k] = v + } + for k, v := range c.untyped { + merged.untyped[k] = v + } + for k, v := range other.untyped { + merged.untyped[k] = v + } + return merged +} + +// Meta is supplied by Scheme, when it calls Convert. +type Meta struct { + // KeyNameMapping is an optional function which may map the listed key (field name) + // into a source and destination value. + KeyNameMapping FieldMappingFunc + // Context is an optional field that callers may use to pass info to conversion functions. + Context interface{} +} + +// scope contains information about an ongoing conversion. +type scope struct { + converter *Converter + meta *Meta + flags FieldMatchingFlags + + // srcStack & destStack are separate because they may not have a 1:1 + // relationship. + srcStack scopeStack + destStack scopeStack +} + +type scopeStackElem struct { + tag reflect.StructTag + value reflect.Value + key string +} + +type scopeStack []scopeStackElem + +func (s *scopeStack) pop() { + n := len(*s) + *s = (*s)[:n-1] +} + +func (s *scopeStack) push(e scopeStackElem) { + *s = append(*s, e) +} + +func (s *scopeStack) top() *scopeStackElem { + return &(*s)[len(*s)-1] +} + +func (s scopeStack) describe() string { + desc := "" + if len(s) > 1 { + desc = "(" + s[1].value.Type().String() + ")" + } + for i, v := range s { + if i < 2 { + // First layer on stack is not real; second is handled specially above. + continue + } + if v.key == "" { + desc += fmt.Sprintf(".%v", v.value.Type()) + } else { + desc += fmt.Sprintf(".%v", v.key) + } + } + return desc +} + +// Formats src & dest as indices for printing. +func (s *scope) setIndices(src, dest int) { + s.srcStack.top().key = fmt.Sprintf("[%v]", src) + s.destStack.top().key = fmt.Sprintf("[%v]", dest) +} + +// Formats src & dest as map keys for printing. +func (s *scope) setKeys(src, dest interface{}) { + s.srcStack.top().key = fmt.Sprintf(`["%v"]`, src) + s.destStack.top().key = fmt.Sprintf(`["%v"]`, dest) +} + +// Convert continues a conversion. +func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error { + return s.converter.Convert(src, dest, flags, s.meta) +} + +// DefaultConvert continues a conversion, performing a default conversion (no conversion func) +// for the current stack frame. +func (s *scope) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error { + return s.converter.DefaultConvert(src, dest, flags, s.meta) +} + +// SrcTag returns the tag of the struct containing the current source item, if any. +func (s *scope) SrcTag() reflect.StructTag { + return s.srcStack.top().tag +} + +// DestTag returns the tag of the struct containing the current dest item, if any. +func (s *scope) DestTag() reflect.StructTag { + return s.destStack.top().tag +} + +// Flags returns the flags with which the current conversion was started. +func (s *scope) Flags() FieldMatchingFlags { + return s.flags +} + +// Meta returns the meta object that was originally passed to Convert. +func (s *scope) Meta() *Meta { + return s.meta +} + +// describe prints the path to get to the current (source, dest) values. +func (s *scope) describe() (src, dest string) { + return s.srcStack.describe(), s.destStack.describe() +} + +// error makes an error that includes information about where we were in the objects +// we were asked to convert. +func (s *scope) errorf(message string, args ...interface{}) error { + srcPath, destPath := s.describe() + where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath) + return fmt.Errorf(where+message, args...) +} + +// Verifies whether a conversion function has a correct signature. +func verifyConversionFunctionSignature(ft reflect.Type) error { + if ft.Kind() != reflect.Func { + return fmt.Errorf("expected func, got: %v", ft) + } + if ft.NumIn() != 3 { + return fmt.Errorf("expected three 'in' params, got: %v", ft) + } + if ft.NumOut() != 1 { + return fmt.Errorf("expected one 'out' param, got: %v", ft) + } + if ft.In(0).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) + } + if ft.In(1).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft) + } + scopeType := Scope(nil) + if e, a := reflect.TypeOf(&scopeType).Elem(), ft.In(2); e != a { + return fmt.Errorf("expected '%v' arg for 'in' param 2, got '%v' (%v)", e, a, ft) + } + var forErrorType error + // This convolution is necessary, otherwise TypeOf picks up on the fact + // that forErrorType is nil. + errorType := reflect.TypeOf(&forErrorType).Elem() + if ft.Out(0) != errorType { + return fmt.Errorf("expected error return, got: %v", ft) + } + return nil +} + +// RegisterConversionFunc registers a conversion func with the +// Converter. conversionFunc must take three parameters: a pointer to the input +// type, a pointer to the output type, and a conversion.Scope (which should be +// used if recursive conversion calls are desired). It must return an error. +// +// Example: +// c.RegisterConversionFunc( +// func(in *Pod, out *v1.Pod, s Scope) error { +// // conversion logic... +// return nil +// }) +// DEPRECATED: Will be removed in favor of RegisterUntypedConversionFunc +func (c *Converter) RegisterConversionFunc(conversionFunc interface{}) error { + return c.conversionFuncs.Add(conversionFunc) +} + +// Similar to RegisterConversionFunc, but registers conversion function that were +// automatically generated. +// DEPRECATED: Will be removed in favor of RegisterGeneratedUntypedConversionFunc +func (c *Converter) RegisterGeneratedConversionFunc(conversionFunc interface{}) error { + return c.generatedConversionFuncs.Add(conversionFunc) +} + +// RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those +// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce +// any other guarantee. +func (c *Converter) RegisterUntypedConversionFunc(a, b interface{}, fn ConversionFunc) error { + return c.conversionFuncs.AddUntyped(a, b, fn) +} + +// RegisterGeneratedUntypedConversionFunc registers a function that converts between a and b by passing objects of those +// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce +// any other guarantee. +func (c *Converter) RegisterGeneratedUntypedConversionFunc(a, b interface{}, fn ConversionFunc) error { + return c.generatedConversionFuncs.AddUntyped(a, b, fn) +} + +// RegisterIgnoredConversion registers a "no-op" for conversion, where any requested +// conversion between from and to is ignored. +func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { + typeFrom := reflect.TypeOf(from) + typeTo := reflect.TypeOf(to) + if reflect.TypeOf(from).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'from' param 0, got: %v", typeFrom) + } + if typeTo.Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo) + } + c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{} + return nil +} + +// RegisterInputDefaults registers a field name mapping function, used when converting +// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping +// applied automatically if the input matches in. A set of default flags for the input conversion +// may also be provided, which will be used when no explicit flags are requested. +func (c *Converter) RegisterInputDefaults(in interface{}, fn FieldMappingFunc, defaultFlags FieldMatchingFlags) error { + fv := reflect.ValueOf(in) + ft := fv.Type() + if ft.Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer 'in' argument, got: %v", ft) + } + c.inputFieldMappingFuncs[ft] = fn + c.inputDefaultFlags[ft] = defaultFlags + return nil +} + +// FieldMatchingFlags contains a list of ways in which struct fields could be +// copied. These constants may be | combined. +type FieldMatchingFlags int + +const ( + // Loop through destination fields, search for matching source + // field to copy it from. Source fields with no corresponding + // destination field will be ignored. If SourceToDest is + // specified, this flag is ignored. If neither is specified, + // or no flags are passed, this flag is the default. + DestFromSource FieldMatchingFlags = 0 + // Loop through source fields, search for matching dest field + // to copy it into. Destination fields with no corresponding + // source field will be ignored. + SourceToDest FieldMatchingFlags = 1 << iota + // Don't treat it as an error if the corresponding source or + // dest field can't be found. + IgnoreMissingFields + // Don't require type names to match. + AllowDifferentFieldTypeNames +) + +// IsSet returns true if the given flag or combination of flags is set. +func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool { + if flag == DestFromSource { + // The bit logic doesn't work on the default value. + return f&SourceToDest != SourceToDest + } + return f&flag == flag +} + +// Convert will translate src to dest if it knows how. Both must be pointers. +// If no conversion func is registered and the default copying mechanism +// doesn't work on this type pair, an error will be returned. +// Read the comments on the various FieldMatchingFlags constants to understand +// what the 'flags' parameter does. +// 'meta' is given to allow you to pass information to conversion functions, +// it is not used by Convert() other than storing it in the scope. +// Not safe for objects with cyclic references! +func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { + return c.doConversion(src, dest, flags, meta, c.convert) +} + +// DefaultConvert will translate src to dest if it knows how. Both must be pointers. +// No conversion func is used. If the default copying mechanism +// doesn't work on this type pair, an error will be returned. +// Read the comments on the various FieldMatchingFlags constants to understand +// what the 'flags' parameter does. +// 'meta' is given to allow you to pass information to conversion functions, +// it is not used by DefaultConvert() other than storing it in the scope. +// Not safe for objects with cyclic references! +func (c *Converter) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { + return c.doConversion(src, dest, flags, meta, c.defaultConvert) +} + +type conversionFunc func(sv, dv reflect.Value, scope *scope) error + +func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error { + pair := typePair{reflect.TypeOf(src), reflect.TypeOf(dest)} + scope := &scope{ + converter: c, + flags: flags, + meta: meta, + } + if fn, ok := c.conversionFuncs.untyped[pair]; ok { + return fn(src, dest, scope) + } + if fn, ok := c.generatedConversionFuncs.untyped[pair]; ok { + return fn(src, dest, scope) + } + // TODO: consider everything past this point deprecated - we want to support only point to point top level + // conversions + + dv, err := EnforcePtr(dest) + if err != nil { + return err + } + if !dv.CanAddr() && !dv.CanSet() { + return fmt.Errorf("can't write to dest") + } + sv, err := EnforcePtr(src) + if err != nil { + return err + } + // Leave something on the stack, so that calls to struct tag getters never fail. + scope.srcStack.push(scopeStackElem{}) + scope.destStack.push(scopeStackElem{}) + return f(sv, dv, scope) +} + +// callCustom calls 'custom' with sv & dv. custom must be a conversion function. +func (c *Converter) callCustom(sv, dv, custom reflect.Value, scope *scope) error { + if !sv.CanAddr() { + sv2 := reflect.New(sv.Type()) + sv2.Elem().Set(sv) + sv = sv2 + } else { + sv = sv.Addr() + } + if !dv.CanAddr() { + if !dv.CanSet() { + return scope.errorf("can't addr or set dest.") + } + dvOrig := dv + dv := reflect.New(dvOrig.Type()) + defer func() { dvOrig.Set(dv) }() + } else { + dv = dv.Addr() + } + args := []reflect.Value{sv, dv, reflect.ValueOf(scope)} + ret := custom.Call(args)[0].Interface() + // This convolution is necessary because nil interfaces won't convert + // to errors. + if ret == nil { + return nil + } + return ret.(error) +} + +// convert recursively copies sv into dv, calling an appropriate conversion function if +// one is registered. +func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error { + dt, st := dv.Type(), sv.Type() + pair := typePair{st, dt} + + // ignore conversions of this type + if _, ok := c.ignoredConversions[pair]; ok { + if c.Debug != nil { + c.Debug.Logf("Ignoring conversion of '%v' to '%v'", st, dt) + } + return nil + } + + // Convert sv to dv. + if fv, ok := c.conversionFuncs.fns[pair]; ok { + if c.Debug != nil { + c.Debug.Logf("Calling custom conversion of '%v' to '%v'", st, dt) + } + return c.callCustom(sv, dv, fv, scope) + } + if fv, ok := c.generatedConversionFuncs.fns[pair]; ok { + if c.Debug != nil { + c.Debug.Logf("Calling generated conversion of '%v' to '%v'", st, dt) + } + return c.callCustom(sv, dv, fv, scope) + } + + return c.defaultConvert(sv, dv, scope) +} + +// defaultConvert recursively copies sv into dv. no conversion function is called +// for the current stack frame (but conversion functions may be called for nested objects) +func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error { + dt, st := dv.Type(), sv.Type() + + if !dv.CanSet() { + return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)") + } + + if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) { + return scope.errorf( + "type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.", + c.nameFunc(st), c.nameFunc(dt), st, dt) + } + + switch st.Kind() { + case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: + // Don't copy these via assignment/conversion! + default: + // This should handle all simple types. + if st.AssignableTo(dt) { + dv.Set(sv) + return nil + } + if st.ConvertibleTo(dt) { + dv.Set(sv.Convert(dt)) + return nil + } + } + + if c.Debug != nil { + c.Debug.Logf("Trying to convert '%v' to '%v'", st, dt) + } + + scope.srcStack.push(scopeStackElem{value: sv}) + scope.destStack.push(scopeStackElem{value: dv}) + defer scope.srcStack.pop() + defer scope.destStack.pop() + + switch dv.Kind() { + case reflect.Struct: + return c.convertKV(toKVValue(sv), toKVValue(dv), scope) + case reflect.Slice: + if sv.IsNil() { + // Don't make a zero-length slice. + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) + for i := 0; i < sv.Len(); i++ { + scope.setIndices(i, i) + if err := c.convert(sv.Index(i), dv.Index(i), scope); err != nil { + return err + } + } + case reflect.Ptr: + if sv.IsNil() { + // Don't copy a nil ptr! + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.New(dt.Elem())) + switch st.Kind() { + case reflect.Ptr, reflect.Interface: + return c.convert(sv.Elem(), dv.Elem(), scope) + default: + return c.convert(sv, dv.Elem(), scope) + } + case reflect.Map: + if sv.IsNil() { + // Don't copy a nil ptr! + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeMap(dt)) + for _, sk := range sv.MapKeys() { + dk := reflect.New(dt.Key()).Elem() + if err := c.convert(sk, dk, scope); err != nil { + return err + } + dkv := reflect.New(dt.Elem()).Elem() + scope.setKeys(sk.Interface(), dk.Interface()) + // TODO: sv.MapIndex(sk) may return a value with CanAddr() == false, + // because a map[string]struct{} does not allow a pointer reference. + // Calling a custom conversion function defined for the map value + // will panic. Example is PodInfo map[string]ContainerStatus. + if err := c.convert(sv.MapIndex(sk), dkv, scope); err != nil { + return err + } + dv.SetMapIndex(dk, dkv) + } + case reflect.Interface: + if sv.IsNil() { + // Don't copy a nil interface! + dv.Set(reflect.Zero(dt)) + return nil + } + tmpdv := reflect.New(sv.Elem().Type()).Elem() + if err := c.convert(sv.Elem(), tmpdv, scope); err != nil { + return err + } + dv.Set(reflect.ValueOf(tmpdv.Interface())) + return nil + default: + return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt) + } + return nil +} + +var stringType = reflect.TypeOf("") + +func toKVValue(v reflect.Value) kvValue { + switch v.Kind() { + case reflect.Struct: + return structAdaptor(v) + case reflect.Map: + if v.Type().Key().AssignableTo(stringType) { + return stringMapAdaptor(v) + } + } + + return nil +} + +// kvValue lets us write the same conversion logic to work with both maps +// and structs. Only maps with string keys make sense for this. +type kvValue interface { + // returns all keys, as a []string. + keys() []string + // Will just return "" for maps. + tagOf(key string) reflect.StructTag + // Will return the zero Value if the key doesn't exist. + value(key string) reflect.Value + // Maps require explicit setting-- will do nothing for structs. + // Returns false on failure. + confirmSet(key string, v reflect.Value) bool +} + +type stringMapAdaptor reflect.Value + +func (a stringMapAdaptor) len() int { + return reflect.Value(a).Len() +} + +func (a stringMapAdaptor) keys() []string { + v := reflect.Value(a) + keys := make([]string, v.Len()) + for i, v := range v.MapKeys() { + if v.IsNil() { + continue + } + switch t := v.Interface().(type) { + case string: + keys[i] = t + } + } + return keys +} + +func (a stringMapAdaptor) tagOf(key string) reflect.StructTag { + return "" +} + +func (a stringMapAdaptor) value(key string) reflect.Value { + return reflect.Value(a).MapIndex(reflect.ValueOf(key)) +} + +func (a stringMapAdaptor) confirmSet(key string, v reflect.Value) bool { + return true +} + +type structAdaptor reflect.Value + +func (a structAdaptor) len() int { + v := reflect.Value(a) + return v.Type().NumField() +} + +func (a structAdaptor) keys() []string { + v := reflect.Value(a) + t := v.Type() + keys := make([]string, t.NumField()) + for i := range keys { + keys[i] = t.Field(i).Name + } + return keys +} + +func (a structAdaptor) tagOf(key string) reflect.StructTag { + v := reflect.Value(a) + field, ok := v.Type().FieldByName(key) + if ok { + return field.Tag + } + return "" +} + +func (a structAdaptor) value(key string) reflect.Value { + v := reflect.Value(a) + return v.FieldByName(key) +} + +func (a structAdaptor) confirmSet(key string, v reflect.Value) bool { + return true +} + +// convertKV can convert things that consist of key/value pairs, like structs +// and some maps. +func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error { + if skv == nil || dkv == nil { + // TODO: add keys to stack to support really understandable error messages. + return fmt.Errorf("Unable to convert %#v to %#v", skv, dkv) + } + + lister := dkv + if scope.flags.IsSet(SourceToDest) { + lister = skv + } + + var mapping FieldMappingFunc + if scope.meta != nil && scope.meta.KeyNameMapping != nil { + mapping = scope.meta.KeyNameMapping + } + + for _, key := range lister.keys() { + if found, err := c.checkField(key, skv, dkv, scope); found { + if err != nil { + return err + } + continue + } + stag := skv.tagOf(key) + dtag := dkv.tagOf(key) + skey := key + dkey := key + if mapping != nil { + skey, dkey = scope.meta.KeyNameMapping(key, stag, dtag) + } + + df := dkv.value(dkey) + sf := skv.value(skey) + if !df.IsValid() || !sf.IsValid() { + switch { + case scope.flags.IsSet(IgnoreMissingFields): + // No error. + case scope.flags.IsSet(SourceToDest): + return scope.errorf("%v not present in dest", dkey) + default: + return scope.errorf("%v not present in src", skey) + } + continue + } + scope.srcStack.top().key = skey + scope.srcStack.top().tag = stag + scope.destStack.top().key = dkey + scope.destStack.top().tag = dtag + if err := c.convert(sf, df, scope); err != nil { + return err + } + } + return nil +} + +// checkField returns true if the field name matches any of the struct +// field copying rules. The error should be ignored if it returns false. +func (c *Converter) checkField(fieldName string, skv, dkv kvValue, scope *scope) (bool, error) { + replacementMade := false + if scope.flags.IsSet(DestFromSource) { + df := dkv.value(fieldName) + if !df.IsValid() { + return false, nil + } + destKey := typeNamePair{df.Type(), fieldName} + // Check each of the potential source (type, name) pairs to see if they're + // present in sv. + for _, potentialSourceKey := range c.structFieldSources[destKey] { + sf := skv.value(potentialSourceKey.fieldName) + if !sf.IsValid() { + continue + } + if sf.Type() == potentialSourceKey.fieldType { + // Both the source's name and type matched, so copy. + scope.srcStack.top().key = potentialSourceKey.fieldName + scope.destStack.top().key = fieldName + if err := c.convert(sf, df, scope); err != nil { + return true, err + } + dkv.confirmSet(fieldName, df) + replacementMade = true + } + } + return replacementMade, nil + } + + sf := skv.value(fieldName) + if !sf.IsValid() { + return false, nil + } + srcKey := typeNamePair{sf.Type(), fieldName} + // Check each of the potential dest (type, name) pairs to see if they're + // present in dv. + for _, potentialDestKey := range c.structFieldDests[srcKey] { + df := dkv.value(potentialDestKey.fieldName) + if !df.IsValid() { + continue + } + if df.Type() == potentialDestKey.fieldType { + // Both the dest's name and type matched, so copy. + scope.srcStack.top().key = fieldName + scope.destStack.top().key = potentialDestKey.fieldName + if err := c.convert(sf, df, scope); err != nil { + return true, err + } + dkv.confirmSet(potentialDestKey.fieldName, df) + replacementMade = true + } + } + return replacementMade, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go new file mode 100644 index 000000000000..f21abe1e53a0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go @@ -0,0 +1,36 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "k8s.io/apimachinery/third_party/forked/golang/reflect" +) + +// The code for this type must be located in third_party, since it forks from +// go std lib. But for convenience, we expose the type here, too. +type Equalities struct { + reflect.Equalities +} + +// For convenience, panics on errors +func EqualitiesOrDie(funcs ...interface{}) Equalities { + e := Equalities{reflect.Equalities{}} + if err := e.AddFuncs(funcs...); err != nil { + panic(err) + } + return e +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go new file mode 100644 index 000000000000..7415d81646bc --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package conversion provides go object versioning. +// +// Specifically, conversion provides a way for you to define multiple versions +// of the same object. You may write functions which implement conversion logic, +// but for the fields which did not change, copying is automated. This makes it +// easy to modify the structures you use in memory without affecting the format +// you store on disk or respond to in your external API calls. +package conversion // import "k8s.io/apimachinery/pkg/conversion" diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/helper.go b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go new file mode 100644 index 000000000000..4ebc1ebc511f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go @@ -0,0 +1,39 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "fmt" + "reflect" +) + +// EnforcePtr ensures that obj is a pointer of some sort. Returns a reflect.Value +// of the dereferenced pointer, ensuring that it is settable/addressable. +// Returns an error if this is not possible. +func EnforcePtr(obj interface{}) (reflect.Value, error) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + if v.Kind() == reflect.Invalid { + return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind") + } + return reflect.Value{}, fmt.Errorf("expected pointer, but got %v type", v.Type()) + } + if v.IsNil() { + return reflect.Value{}, fmt.Errorf("expected pointer, but got nil") + } + return v.Elem(), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go new file mode 100644 index 000000000000..b3804aa42b26 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -0,0 +1,198 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryparams + +import ( + "fmt" + "net/url" + "reflect" + "strings" +) + +// Marshaler converts an object to a query parameter string representation +type Marshaler interface { + MarshalQueryParameter() (string, error) +} + +// Unmarshaler converts a string representation to an object +type Unmarshaler interface { + UnmarshalQueryParameter(string) error +} + +func jsonTag(field reflect.StructField) (string, bool) { + structTag := field.Tag.Get("json") + if len(structTag) == 0 { + return "", false + } + parts := strings.Split(structTag, ",") + tag := parts[0] + if tag == "-" { + tag = "" + } + omitempty := false + parts = parts[1:] + for _, part := range parts { + if part == "omitempty" { + omitempty = true + break + } + } + return tag, omitempty +} + +func formatValue(value interface{}) string { + return fmt.Sprintf("%v", value) +} + +func isPointerKind(kind reflect.Kind) bool { + return kind == reflect.Ptr +} + +func isStructKind(kind reflect.Kind) bool { + return kind == reflect.Struct +} + +func isValueKind(kind reflect.Kind) bool { + switch kind { + case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, + reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, + reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, + reflect.Float64, reflect.Complex64, reflect.Complex128: + return true + default: + return false + } +} + +func zeroValue(value reflect.Value) bool { + return reflect.DeepEqual(reflect.Zero(value.Type()).Interface(), value.Interface()) +} + +func customMarshalValue(value reflect.Value) (reflect.Value, bool) { + // Return unless we implement a custom query marshaler + if !value.CanInterface() { + return reflect.Value{}, false + } + + marshaler, ok := value.Interface().(Marshaler) + if !ok { + if !isPointerKind(value.Kind()) && value.CanAddr() { + marshaler, ok = value.Addr().Interface().(Marshaler) + if !ok { + return reflect.Value{}, false + } + } else { + return reflect.Value{}, false + } + } + + // Don't invoke functions on nil pointers + // If the type implements MarshalQueryParameter, AND the tag is not omitempty, AND the value is a nil pointer, "" seems like a reasonable response + if isPointerKind(value.Kind()) && zeroValue(value) { + return reflect.ValueOf(""), true + } + + // Get the custom marshalled value + v, err := marshaler.MarshalQueryParameter() + if err != nil { + return reflect.Value{}, false + } + return reflect.ValueOf(v), true +} + +func addParam(values url.Values, tag string, omitempty bool, value reflect.Value) { + if omitempty && zeroValue(value) { + return + } + val := "" + iValue := fmt.Sprintf("%v", value.Interface()) + + if iValue != "" { + val = iValue + } + values.Add(tag, val) +} + +func addListOfParams(values url.Values, tag string, omitempty bool, list reflect.Value) { + for i := 0; i < list.Len(); i++ { + addParam(values, tag, omitempty, list.Index(i)) + } +} + +// Convert takes an object and converts it to a url.Values object using JSON tags as +// parameter names. Only top-level simple values, arrays, and slices are serialized. +// Embedded structs, maps, etc. will not be serialized. +func Convert(obj interface{}) (url.Values, error) { + result := url.Values{} + if obj == nil { + return result, nil + } + var sv reflect.Value + switch reflect.TypeOf(obj).Kind() { + case reflect.Ptr, reflect.Interface: + sv = reflect.ValueOf(obj).Elem() + default: + return nil, fmt.Errorf("expecting a pointer or interface") + } + st := sv.Type() + if !isStructKind(st.Kind()) { + return nil, fmt.Errorf("expecting a pointer to a struct") + } + + // Check all object fields + convertStruct(result, st, sv) + + return result, nil +} + +func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) { + for i := 0; i < st.NumField(); i++ { + field := sv.Field(i) + tag, omitempty := jsonTag(st.Field(i)) + if len(tag) == 0 { + continue + } + ft := field.Type() + + kind := ft.Kind() + if isPointerKind(kind) { + ft = ft.Elem() + kind = ft.Kind() + if !field.IsNil() { + field = reflect.Indirect(field) + // If the field is non-nil, it should be added to params + // and the omitempty should be overwite to false + omitempty = false + } + } + + switch { + case isValueKind(kind): + addParam(result, tag, omitempty, field) + case kind == reflect.Array || kind == reflect.Slice: + if isValueKind(ft.Elem().Kind()) { + addListOfParams(result, tag, omitempty, field) + } + case isStructKind(kind) && !(zeroValue(field) && omitempty): + if marshalValue, ok := customMarshalValue(field); ok { + addParam(result, tag, omitempty, marshalValue) + } else { + convertStruct(result, ft, field) + } + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go new file mode 100644 index 000000000000..7b763de6f013 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package queryparams provides conversion from versioned +// runtime objects to URL query values +package queryparams // import "k8s.io/apimachinery/pkg/conversion/queryparams" diff --git a/vendor/k8s.io/apimachinery/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go new file mode 100644 index 000000000000..c39b8039ae7b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fields implements a simple field system, parsing and matching +// selectors with sets of fields. +package fields // import "k8s.io/apimachinery/pkg/fields" diff --git a/vendor/k8s.io/apimachinery/pkg/fields/fields.go b/vendor/k8s.io/apimachinery/pkg/fields/fields.go new file mode 100644 index 000000000000..623b27e95715 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/fields.go @@ -0,0 +1,62 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "sort" + "strings" +) + +// Fields allows you to present fields independently from their storage. +type Fields interface { + // Has returns whether the provided field exists. + Has(field string) (exists bool) + + // Get returns the value for the provided field. + Get(field string) (value string) +} + +// Set is a map of field:value. It implements Fields. +type Set map[string]string + +// String returns all fields listed as a human readable string. +// Conveniently, exactly the format that ParseSelector takes. +func (ls Set) String() string { + selector := make([]string, 0, len(ls)) + for key, value := range ls { + selector = append(selector, key+"="+value) + } + // Sort for determinism. + sort.StringSlice(selector).Sort() + return strings.Join(selector, ",") +} + +// Has returns whether the provided field exists in the map. +func (ls Set) Has(field string) bool { + _, exists := ls[field] + return exists +} + +// Get returns the value in the map for the provided field. +func (ls Set) Get(field string) string { + return ls[field] +} + +// AsSelector converts fields into a selectors. +func (ls Set) AsSelector() Selector { + return SelectorFromSet(ls) +} diff --git a/vendor/k8s.io/apimachinery/pkg/fields/requirements.go b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go new file mode 100644 index 000000000000..70d94ded885d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import "k8s.io/apimachinery/pkg/selection" + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Requirement contains a field, a value, and an operator that relates the field and value. +// This is currently for reading internal selection information of field selector. +type Requirement struct { + Operator selection.Operator + Field string + Value string +} diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go new file mode 100644 index 000000000000..e3e4453b64f1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go @@ -0,0 +1,476 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/selection" +) + +// Selector represents a field selector. +type Selector interface { + // Matches returns true if this selector matches the given set of fields. + Matches(Fields) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific field to be set, and if so returns the value it + // requires. + RequiresExactMatch(field string) (value string, found bool) + + // Transform returns a new copy of the selector after TransformFunc has been + // applied to the entire selector, or an error if fn returns an error. + // If for a given requirement both field and value are transformed to empty + // string, the requirement is skipped. + Transform(fn TransformFunc) (Selector, error) + + // Requirements converts this interface to Requirements to expose + // more detailed selection information. + Requirements() Requirements + + // String returns a human readable string that represents this selector. + String() string + + // Make a deep copy of the selector. + DeepCopySelector() Selector +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(_ Fields) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Requirements() Requirements { return nil } +func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) RequiresExactMatch(field string) (value string, found bool) { return "", false } +func (n nothingSelector) Transform(fn TransformFunc) (Selector, error) { return n, nil } + +// Nothing returns a selector that matches no fields +func Nothing() Selector { + return nothingSelector{} +} + +// Everything returns a selector that matches all fields. +func Everything() Selector { + return andTerm{} +} + +type hasTerm struct { + field, value string +} + +func (t *hasTerm) Matches(ls Fields) bool { + return ls.Get(t.field) == t.value +} + +func (t *hasTerm) Empty() bool { + return false +} + +func (t *hasTerm) RequiresExactMatch(field string) (value string, found bool) { + if t.field == field { + return t.value, true + } + return "", false +} + +func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) { + field, value, err := fn(t.field, t.value) + if err != nil { + return nil, err + } + if len(field) == 0 && len(value) == 0 { + return Everything(), nil + } + return &hasTerm{field, value}, nil +} + +func (t *hasTerm) Requirements() Requirements { + return []Requirement{{ + Field: t.field, + Operator: selection.Equals, + Value: t.value, + }} +} + +func (t *hasTerm) String() string { + return fmt.Sprintf("%v=%v", t.field, EscapeValue(t.value)) +} + +func (t *hasTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := new(hasTerm) + *out = *t + return out +} + +type notHasTerm struct { + field, value string +} + +func (t *notHasTerm) Matches(ls Fields) bool { + return ls.Get(t.field) != t.value +} + +func (t *notHasTerm) Empty() bool { + return false +} + +func (t *notHasTerm) RequiresExactMatch(field string) (value string, found bool) { + return "", false +} + +func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) { + field, value, err := fn(t.field, t.value) + if err != nil { + return nil, err + } + if len(field) == 0 && len(value) == 0 { + return Everything(), nil + } + return ¬HasTerm{field, value}, nil +} + +func (t *notHasTerm) Requirements() Requirements { + return []Requirement{{ + Field: t.field, + Operator: selection.NotEquals, + Value: t.value, + }} +} + +func (t *notHasTerm) String() string { + return fmt.Sprintf("%v!=%v", t.field, EscapeValue(t.value)) +} + +func (t *notHasTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := new(notHasTerm) + *out = *t + return out +} + +type andTerm []Selector + +func (t andTerm) Matches(ls Fields) bool { + for _, q := range t { + if !q.Matches(ls) { + return false + } + } + return true +} + +func (t andTerm) Empty() bool { + if t == nil { + return true + } + if len([]Selector(t)) == 0 { + return true + } + for i := range t { + if !t[i].Empty() { + return false + } + } + return true +} + +func (t andTerm) RequiresExactMatch(field string) (string, bool) { + if t == nil || len([]Selector(t)) == 0 { + return "", false + } + for i := range t { + if value, found := t[i].RequiresExactMatch(field); found { + return value, found + } + } + return "", false +} + +func (t andTerm) Transform(fn TransformFunc) (Selector, error) { + next := make([]Selector, 0, len([]Selector(t))) + for _, s := range []Selector(t) { + n, err := s.Transform(fn) + if err != nil { + return nil, err + } + if !n.Empty() { + next = append(next, n) + } + } + return andTerm(next), nil +} + +func (t andTerm) Requirements() Requirements { + reqs := make([]Requirement, 0, len(t)) + for _, s := range []Selector(t) { + rs := s.Requirements() + reqs = append(reqs, rs...) + } + return reqs +} + +func (t andTerm) String() string { + var terms []string + for _, q := range t { + terms = append(terms, q.String()) + } + return strings.Join(terms, ",") +} + +func (t andTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := make([]Selector, len(t)) + for i := range t { + out[i] = t[i].DeepCopySelector() + } + return andTerm(out) +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil Set is considered equivalent to Everything(). +func SelectorFromSet(ls Set) Selector { + if ls == nil { + return Everything() + } + items := make([]Selector, 0, len(ls)) + for field, value := range ls { + items = append(items, &hasTerm{field: field, value: value}) + } + if len(items) == 1 { + return items[0] + } + return andTerm(items) +} + +// valueEscaper prefixes \,= characters with a backslash +var valueEscaper = strings.NewReplacer( + // escape \ characters + `\`, `\\`, + // then escape , and = characters to allow unambiguous parsing of the value in a fieldSelector + `,`, `\,`, + `=`, `\=`, +) + +// EscapeValue escapes an arbitrary literal string for use as a fieldSelector value +func EscapeValue(s string) string { + return valueEscaper.Replace(s) +} + +// InvalidEscapeSequence indicates an error occurred unescaping a field selector +type InvalidEscapeSequence struct { + sequence string +} + +func (i InvalidEscapeSequence) Error() string { + return fmt.Sprintf("invalid field selector: invalid escape sequence: %s", i.sequence) +} + +// UnescapedRune indicates an error occurred unescaping a field selector +type UnescapedRune struct { + r rune +} + +func (i UnescapedRune) Error() string { + return fmt.Sprintf("invalid field selector: unescaped character in value: %v", i.r) +} + +// UnescapeValue unescapes a fieldSelector value and returns the original literal value. +// May return the original string if it contains no escaped or special characters. +func UnescapeValue(s string) (string, error) { + // if there's no escaping or special characters, just return to avoid allocation + if !strings.ContainsAny(s, `\,=`) { + return s, nil + } + + v := bytes.NewBuffer(make([]byte, 0, len(s))) + inSlash := false + for _, c := range s { + if inSlash { + switch c { + case '\\', ',', '=': + // omit the \ for recognized escape sequences + v.WriteRune(c) + default: + // error on unrecognized escape sequences + return "", InvalidEscapeSequence{sequence: string([]rune{'\\', c})} + } + inSlash = false + continue + } + + switch c { + case '\\': + inSlash = true + case ',', '=': + // unescaped , and = characters are not allowed in field selector values + return "", UnescapedRune{r: c} + default: + v.WriteRune(c) + } + } + + // Ending with a single backslash is an invalid sequence + if inSlash { + return "", InvalidEscapeSequence{sequence: "\\"} + } + + return v.String(), nil +} + +// ParseSelectorOrDie takes a string representing a selector and returns an +// object suitable for matching, or panic when an error occur. +func ParseSelectorOrDie(s string) Selector { + selector, err := ParseSelector(s) + if err != nil { + panic(err) + } + return selector +} + +// ParseSelector takes a string representing a selector and returns an +// object suitable for matching, or an error. +func ParseSelector(selector string) (Selector, error) { + return parseSelector(selector, + func(lhs, rhs string) (newLhs, newRhs string, err error) { + return lhs, rhs, nil + }) +} + +// ParseAndTransformSelector parses the selector and runs them through the given TransformFunc. +func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) { + return parseSelector(selector, fn) +} + +// TransformFunc transforms selectors. +type TransformFunc func(field, value string) (newField, newValue string, err error) + +// splitTerms returns the comma-separated terms contained in the given fieldSelector. +// Backslash-escaped commas are treated as data instead of delimiters, and are included in the returned terms, with the leading backslash preserved. +func splitTerms(fieldSelector string) []string { + if len(fieldSelector) == 0 { + return nil + } + + terms := make([]string, 0, 1) + startIndex := 0 + inSlash := false + for i, c := range fieldSelector { + switch { + case inSlash: + inSlash = false + case c == '\\': + inSlash = true + case c == ',': + terms = append(terms, fieldSelector[startIndex:i]) + startIndex = i + 1 + } + } + + terms = append(terms, fieldSelector[startIndex:]) + + return terms +} + +const ( + notEqualOperator = "!=" + doubleEqualOperator = "==" + equalOperator = "=" +) + +// termOperators holds the recognized operators supported in fieldSelectors. +// doubleEqualOperator and equal are equivalent, but doubleEqualOperator is checked first +// to avoid leaving a leading = character on the rhs value. +var termOperators = []string{notEqualOperator, doubleEqualOperator, equalOperator} + +// splitTerm returns the lhs, operator, and rhs parsed from the given term, along with an indicator of whether the parse was successful. +// no escaping of special characters is supported in the lhs value, so the first occurrence of a recognized operator is used as the split point. +// the literal rhs is returned, and the caller is responsible for applying any desired unescaping. +func splitTerm(term string) (lhs, op, rhs string, ok bool) { + for i := range term { + remaining := term[i:] + for _, op := range termOperators { + if strings.HasPrefix(remaining, op) { + return term[0:i], op, term[i+len(op):], true + } + } + } + return "", "", "", false +} + +func parseSelector(selector string, fn TransformFunc) (Selector, error) { + parts := splitTerms(selector) + sort.StringSlice(parts).Sort() + var items []Selector + for _, part := range parts { + if part == "" { + continue + } + lhs, op, rhs, ok := splitTerm(part) + if !ok { + return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) + } + unescapedRHS, err := UnescapeValue(rhs) + if err != nil { + return nil, err + } + switch op { + case notEqualOperator: + items = append(items, ¬HasTerm{field: lhs, value: unescapedRHS}) + case doubleEqualOperator: + items = append(items, &hasTerm{field: lhs, value: unescapedRHS}) + case equalOperator: + items = append(items, &hasTerm{field: lhs, value: unescapedRHS}) + default: + return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) + } + } + if len(items) == 1 { + return items[0].Transform(fn) + } + return andTerm(items).Transform(fn) +} + +// OneTermEqualSelector returns an object that matches objects where one field/field equals one value. +// Cannot return an error. +func OneTermEqualSelector(k, v string) Selector { + return &hasTerm{field: k, value: v} +} + +// OneTermNotEqualSelector returns an object that matches objects where one field/field does not equal one value. +// Cannot return an error. +func OneTermNotEqualSelector(k, v string) Selector { + return ¬HasTerm{field: k, value: v} +} + +// AndSelectors creates a selector that is the logical AND of all the given selectors +func AndSelectors(selectors ...Selector) Selector { + return andTerm(selectors) +} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go new file mode 100644 index 000000000000..82de0051bd63 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package labels implements a simple label system, parsing and matching +// selectors with sets of labels. +package labels // import "k8s.io/apimachinery/pkg/labels" diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go new file mode 100644 index 000000000000..32db4d96f69f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -0,0 +1,181 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "fmt" + "sort" + "strings" +) + +// Labels allows you to present labels independently from their storage. +type Labels interface { + // Has returns whether the provided label exists. + Has(label string) (exists bool) + + // Get returns the value for the provided label. + Get(label string) (value string) +} + +// Set is a map of label:value. It implements Labels. +type Set map[string]string + +// String returns all labels listed as a human readable string. +// Conveniently, exactly the format that ParseSelector takes. +func (ls Set) String() string { + selector := make([]string, 0, len(ls)) + for key, value := range ls { + selector = append(selector, key+"="+value) + } + // Sort for determinism. + sort.StringSlice(selector).Sort() + return strings.Join(selector, ",") +} + +// Has returns whether the provided label exists in the map. +func (ls Set) Has(label string) bool { + _, exists := ls[label] + return exists +} + +// Get returns the value in the map for the provided label. +func (ls Set) Get(label string) string { + return ls[label] +} + +// AsSelector converts labels into a selectors. +func (ls Set) AsSelector() Selector { + return SelectorFromSet(ls) +} + +// AsSelectorPreValidated converts labels into a selector, but +// assumes that labels are already validated and thus don't +// preform any validation. +// According to our measurements this is significantly faster +// in codepaths that matter at high scale. +func (ls Set) AsSelectorPreValidated() Selector { + return SelectorFromValidatedSet(ls) +} + +// FormatLabels convert label map into plain string +func FormatLabels(labelMap map[string]string) string { + l := Set(labelMap).String() + if l == "" { + l = "" + } + return l +} + +// Conflicts takes 2 maps and returns true if there a key match between +// the maps but the value doesn't match, and returns false in other cases +func Conflicts(labels1, labels2 Set) bool { + small := labels1 + big := labels2 + if len(labels2) < len(labels1) { + small = labels2 + big = labels1 + } + + for k, v := range small { + if val, match := big[k]; match { + if val != v { + return true + } + } + } + + return false +} + +// Merge combines given maps, and does not check for any conflicts +// between the maps. In case of conflicts, second map (labels2) wins +func Merge(labels1, labels2 Set) Set { + mergedMap := Set{} + + for k, v := range labels1 { + mergedMap[k] = v + } + for k, v := range labels2 { + mergedMap[k] = v + } + return mergedMap +} + +// Equals returns true if the given maps are equal +func Equals(labels1, labels2 Set) bool { + if len(labels1) != len(labels2) { + return false + } + + for k, v := range labels1 { + value, ok := labels2[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +// AreLabelsInWhiteList verifies if the provided label list +// is in the provided whitelist and returns true, otherwise false. +func AreLabelsInWhiteList(labels, whitelist Set) bool { + if len(whitelist) == 0 { + return true + } + + for k, v := range labels { + value, ok := whitelist[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +// ConvertSelectorToLabelsMap converts selector string to labels map +// and validates keys and values +func ConvertSelectorToLabelsMap(selector string) (Set, error) { + labelsMap := Set{} + + if len(selector) == 0 { + return labelsMap, nil + } + + labels := strings.Split(selector, ",") + for _, label := range labels { + l := strings.Split(label, "=") + if len(l) != 2 { + return labelsMap, fmt.Errorf("invalid selector: %s", l) + } + key := strings.TrimSpace(l[0]) + if err := validateLabelKey(key); err != nil { + return labelsMap, err + } + value := strings.TrimSpace(l[1]) + if err := validateLabelValue(value); err != nil { + return labelsMap, err + } + labelsMap[key] = value + } + return labelsMap, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go new file mode 100644 index 000000000000..f5a0888932f2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -0,0 +1,891 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog" +) + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Selector represents a label selector. +type Selector interface { + // Matches returns true if this selector matches the given set of labels. + Matches(Labels) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // String returns a human readable string that represents this selector. + String() string + + // Add adds requirements to the Selector + Add(r ...Requirement) Selector + + // Requirements converts this interface into Requirements to expose + // more detailed selection information. + // If there are querying parameters, it will return converted requirements and selectable=true. + // If this selector doesn't want to select anything, it will return selectable=false. + Requirements() (requirements Requirements, selectable bool) + + // Make a deep copy of the selector. + DeepCopySelector() Selector +} + +// Everything returns a selector that matches all labels. +func Everything() Selector { + return internalSelector{} +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } +func (n nothingSelector) DeepCopySelector() Selector { return n } + +// Nothing returns a selector that matches no labels +func Nothing() Selector { + return nothingSelector{} +} + +// NewSelector returns a nil selector +func NewSelector() Selector { + return internalSelector(nil) +} + +type internalSelector []Requirement + +func (s internalSelector) DeepCopy() internalSelector { + if s == nil { + return nil + } + result := make([]Requirement, len(s)) + for i := range s { + s[i].DeepCopyInto(&result[i]) + } + return result +} + +func (s internalSelector) DeepCopySelector() Selector { + return s.DeepCopy() +} + +// ByKey sorts requirements by key to obtain deterministic parser +type ByKey []Requirement + +func (a ByKey) Len() int { return len(a) } + +func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } + +// Requirement contains values, a key, and an operator that relates the key and values. +// The zero value of Requirement is invalid. +// Requirement implements both set based match and exact match +// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. +// +k8s:deepcopy-gen=true +type Requirement struct { + key string + operator selection.Operator + // In huge majority of cases we have at most one value here. + // It is generally faster to operate on a single-element slice + // than on a single-element map, so we have a slice here. + strValues []string +} + +// NewRequirement is the constructor for a Requirement. +// If any of these rules is violated, an error is returned: +// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. +// (2) If the operator is In or NotIn, the values set must be non-empty. +// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// (4) If the operator is Exists or DoesNotExist, the value set must be empty. +// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// (6) The key is invalid due to its length, or sequence +// of characters. See validateLabelKey for more details. +// +// The empty string is a valid value in the input values set. +func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) { + if err := validateLabelKey(key); err != nil { + return nil, err + } + switch op { + case selection.In, selection.NotIn: + if len(vals) == 0 { + return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty") + } + case selection.Equals, selection.DoubleEquals, selection.NotEquals: + if len(vals) != 1 { + return nil, fmt.Errorf("exact-match compatibility requires one single value") + } + case selection.Exists, selection.DoesNotExist: + if len(vals) != 0 { + return nil, fmt.Errorf("values set must be empty for exists and does not exist") + } + case selection.GreaterThan, selection.LessThan: + if len(vals) != 1 { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required") + } + for i := range vals { + if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer") + } + } + default: + return nil, fmt.Errorf("operator '%v' is not recognized", op) + } + + for i := range vals { + if err := validateLabelValue(vals[i]); err != nil { + return nil, err + } + } + return &Requirement{key: key, operator: op, strValues: vals}, nil +} + +func (r *Requirement) hasValue(value string) bool { + for i := range r.strValues { + if r.strValues[i] == value { + return true + } + } + return false +} + +// Matches returns true if the Requirement matches the input Labels. +// There is a match in the following cases: +// (1) The operator is Exists and Labels has the Requirement's key. +// (2) The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// (3) The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// (4) The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. +func (r *Requirement) Matches(ls Labels) bool { + switch r.operator { + case selection.In, selection.Equals, selection.DoubleEquals: + if !ls.Has(r.key) { + return false + } + return r.hasValue(ls.Get(r.key)) + case selection.NotIn, selection.NotEquals: + if !ls.Has(r.key) { + return true + } + return !r.hasValue(ls.Get(r.key)) + case selection.Exists: + return ls.Has(r.key) + case selection.DoesNotExist: + return !ls.Has(r.key) + case selection.GreaterThan, selection.LessThan: + if !ls.Has(r.key) { + return false + } + lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) + if err != nil { + klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + return false + } + + // There should be only one strValue in r.strValues, and can be converted to a integer. + if len(r.strValues) != 1 { + klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + return false + } + + var rValue int64 + for i := range r.strValues { + rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) + if err != nil { + klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + return false + } + } + return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue) + default: + return false + } +} + +// Key returns requirement key +func (r *Requirement) Key() string { + return r.key +} + +// Operator returns requirement operator +func (r *Requirement) Operator() selection.Operator { + return r.operator +} + +// Values returns requirement values +func (r *Requirement) Values() sets.String { + ret := sets.String{} + for i := range r.strValues { + ret.Insert(r.strValues[i]) + } + return ret +} + +// Empty returns true if the internalSelector doesn't restrict selection space +func (lsel internalSelector) Empty() bool { + if lsel == nil { + return true + } + return len(lsel) == 0 +} + +// String returns a human-readable string that represents this +// Requirement. If called on an invalid Requirement, an error is +// returned. See NewRequirement for creating a valid Requirement. +func (r *Requirement) String() string { + var buffer bytes.Buffer + if r.operator == selection.DoesNotExist { + buffer.WriteString("!") + } + buffer.WriteString(r.key) + + switch r.operator { + case selection.Equals: + buffer.WriteString("=") + case selection.DoubleEquals: + buffer.WriteString("==") + case selection.NotEquals: + buffer.WriteString("!=") + case selection.In: + buffer.WriteString(" in ") + case selection.NotIn: + buffer.WriteString(" notin ") + case selection.GreaterThan: + buffer.WriteString(">") + case selection.LessThan: + buffer.WriteString("<") + case selection.Exists, selection.DoesNotExist: + return buffer.String() + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString("(") + } + if len(r.strValues) == 1 { + buffer.WriteString(r.strValues[0]) + } else { // only > 1 since == 0 prohibited by NewRequirement + // normalizes value order on output, without mutating the in-memory selector representation + // also avoids normalization when it is not required, and ensures we do not mutate shared data + buffer.WriteString(strings.Join(safeSort(r.strValues), ",")) + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString(")") + } + return buffer.String() +} + +// safeSort sort input strings without modification +func safeSort(in []string) []string { + if sort.StringsAreSorted(in) { + return in + } + out := make([]string, len(in)) + copy(out, in) + sort.Strings(out) + return out +} + +// Add adds requirements to the selector. It copies the current selector returning a new one +func (lsel internalSelector) Add(reqs ...Requirement) Selector { + var sel internalSelector + for ix := range lsel { + sel = append(sel, lsel[ix]) + } + for _, r := range reqs { + sel = append(sel, r) + } + sort.Sort(ByKey(sel)) + return sel +} + +// Matches for a internalSelector returns true if all +// its Requirements match the input Labels. If any +// Requirement does not match, false is returned. +func (lsel internalSelector) Matches(l Labels) bool { + for ix := range lsel { + if matches := lsel[ix].Matches(l); !matches { + return false + } + } + return true +} + +func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } + +// String returns a comma-separated string of all +// the internalSelector Requirements' human-readable strings. +func (lsel internalSelector) String() string { + var reqs []string + for ix := range lsel { + reqs = append(reqs, lsel[ix].String()) + } + return strings.Join(reqs, ",") +} + +// Token represents constant definition for lexer token +type Token int + +const ( + // ErrorToken represents scan error + ErrorToken Token = iota + // EndOfStringToken represents end of string + EndOfStringToken + // ClosedParToken represents close parenthesis + ClosedParToken + // CommaToken represents the comma + CommaToken + // DoesNotExistToken represents logic not + DoesNotExistToken + // DoubleEqualsToken represents double equals + DoubleEqualsToken + // EqualsToken represents equal + EqualsToken + // GreaterThanToken represents greater than + GreaterThanToken + // IdentifierToken represents identifier, e.g. keys and values + IdentifierToken + // InToken represents in + InToken + // LessThanToken represents less than + LessThanToken + // NotEqualsToken represents not equal + NotEqualsToken + // NotInToken represents not in + NotInToken + // OpenParToken represents open parenthesis + OpenParToken +) + +// string2token contains the mapping between lexer Token and token literal +// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) +var string2token = map[string]Token{ + ")": ClosedParToken, + ",": CommaToken, + "!": DoesNotExistToken, + "==": DoubleEqualsToken, + "=": EqualsToken, + ">": GreaterThanToken, + "in": InToken, + "<": LessThanToken, + "!=": NotEqualsToken, + "notin": NotInToken, + "(": OpenParToken, +} + +// ScannedItem contains the Token and the literal produced by the lexer. +type ScannedItem struct { + tok Token + literal string +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch byte) bool { + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' +} + +// isSpecialSymbol detect if the character ch can be an operator +func isSpecialSymbol(ch byte) bool { + switch ch { + case '=', '!', '(', ')', ',', '>', '<': + return true + } + return false +} + +// Lexer represents the Lexer struct for label selector. +// It contains necessary informationt to tokenize the input string +type Lexer struct { + // s stores the string to be tokenized + s string + // pos is the position currently tokenized + pos int +} + +// read return the character currently lexed +// increment the position and check the buffer overflow +func (l *Lexer) read() (b byte) { + b = 0 + if l.pos < len(l.s) { + b = l.s[l.pos] + l.pos++ + } + return b +} + +// unread 'undoes' the last read character +func (l *Lexer) unread() { + l.pos-- +} + +// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier. +func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) { + var buffer []byte +IdentifierLoop: + for { + switch ch := l.read(); { + case ch == 0: + break IdentifierLoop + case isSpecialSymbol(ch) || isWhitespace(ch): + l.unread() + break IdentifierLoop + default: + buffer = append(buffer, ch) + } + } + s := string(buffer) + if val, ok := string2token[s]; ok { // is a literal token? + return val, s + } + return IdentifierToken, s // otherwise is an identifier +} + +// scanSpecialSymbol scans string starting with special symbol. +// special symbol identify non literal operators. "!=", "==", "=" +func (l *Lexer) scanSpecialSymbol() (Token, string) { + lastScannedItem := ScannedItem{} + var buffer []byte +SpecialSymbolLoop: + for { + switch ch := l.read(); { + case ch == 0: + break SpecialSymbolLoop + case isSpecialSymbol(ch): + buffer = append(buffer, ch) + if token, ok := string2token[string(buffer)]; ok { + lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} + } else if lastScannedItem.tok != 0 { + l.unread() + break SpecialSymbolLoop + } + default: + l.unread() + break SpecialSymbolLoop + } + } + if lastScannedItem.tok == 0 { + return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) + } + return lastScannedItem.tok, lastScannedItem.literal +} + +// skipWhiteSpaces consumes all blank characters +// returning the first non blank character +func (l *Lexer) skipWhiteSpaces(ch byte) byte { + for { + if !isWhitespace(ch) { + return ch + } + ch = l.read() + } +} + +// Lex returns a pair of Token and the literal +// literal is meaningfull only for IdentifierToken token +func (l *Lexer) Lex() (tok Token, lit string) { + switch ch := l.skipWhiteSpaces(l.read()); { + case ch == 0: + return EndOfStringToken, "" + case isSpecialSymbol(ch): + l.unread() + return l.scanSpecialSymbol() + default: + l.unread() + return l.scanIDOrKeyword() + } +} + +// Parser data structure contains the label selector parser data structure +type Parser struct { + l *Lexer + scannedItems []ScannedItem + position int +} + +// ParserContext represents context during parsing: +// some literal for example 'in' and 'notin' can be +// recognized as operator for example 'x in (a)' but +// it can be recognized as value for example 'value in (in)' +type ParserContext int + +const ( + // KeyAndOperator represents key and operator + KeyAndOperator ParserContext = iota + // Values represents values + Values +) + +// lookahead func returns the current token and string. No increment of current position +func (p *Parser) lookahead(context ParserContext) (Token, string) { + tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// consume returns current token and string. Increments the position +func (p *Parser) consume(context ParserContext) (Token, string) { + p.position++ + tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// scan runs through the input string and stores the ScannedItem in an array +// Parser can now lookahead and consume the tokens +func (p *Parser) scan() { + for { + token, literal := p.l.Lex() + p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) + if token == EndOfStringToken { + break + } + } +} + +// parse runs the left recursive descending algorithm +// on input string. It returns a list of Requirement objects. +func (p *Parser) parse() (internalSelector, error) { + p.scan() // init scannedItems + + var requirements internalSelector + for { + tok, lit := p.lookahead(Values) + switch tok { + case IdentifierToken, DoesNotExistToken: + r, err := p.parseRequirement() + if err != nil { + return nil, fmt.Errorf("unable to parse requirement: %v", err) + } + requirements = append(requirements, *r) + t, l := p.consume(Values) + switch t { + case EndOfStringToken: + return requirements, nil + case CommaToken: + t2, l2 := p.lookahead(Values) + if t2 != IdentifierToken && t2 != DoesNotExistToken { + return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) + } + default: + return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) + } + case EndOfStringToken: + return requirements, nil + default: + return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit) + } + } +} + +func (p *Parser) parseRequirement() (*Requirement, error) { + key, operator, err := p.parseKeyAndInferOperator() + if err != nil { + return nil, err + } + if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked + return NewRequirement(key, operator, []string{}) + } + operator, err = p.parseOperator() + if err != nil { + return nil, err + } + var values sets.String + switch operator { + case selection.In, selection.NotIn: + values, err = p.parseValues() + case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: + values, err = p.parseExactValue() + } + if err != nil { + return nil, err + } + return NewRequirement(key, operator, values.List()) + +} + +// parseKeyAndInferOperator parse literals. +// in case of no operator '!, in, notin, ==, =, !=' are found +// the 'exists' operator is inferred +func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) { + var operator selection.Operator + tok, literal := p.consume(Values) + if tok == DoesNotExistToken { + operator = selection.DoesNotExist + tok, literal = p.consume(Values) + } + if tok != IdentifierToken { + err := fmt.Errorf("found '%s', expected: identifier", literal) + return "", "", err + } + if err := validateLabelKey(literal); err != nil { + return "", "", err + } + if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken { + if operator != selection.DoesNotExist { + operator = selection.Exists + } + } + return literal, operator, nil +} + +// parseOperator return operator and eventually matchType +// matchType can be exact +func (p *Parser) parseOperator() (op selection.Operator, err error) { + tok, lit := p.consume(KeyAndOperator) + switch tok { + // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator + case InToken: + op = selection.In + case EqualsToken: + op = selection.Equals + case DoubleEqualsToken: + op = selection.DoubleEquals + case GreaterThanToken: + op = selection.GreaterThan + case LessThanToken: + op = selection.LessThan + case NotInToken: + op = selection.NotIn + case NotEqualsToken: + op = selection.NotEquals + default: + return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) + } + return op, nil +} + +// parseValues parses the values for set based matching (x,y,z) +func (p *Parser) parseValues() (sets.String, error) { + tok, lit := p.consume(Values) + if tok != OpenParToken { + return nil, fmt.Errorf("found '%s' expected: '('", lit) + } + tok, lit = p.lookahead(Values) + switch tok { + case IdentifierToken, CommaToken: + s, err := p.parseIdentifiersList() // handles general cases + if err != nil { + return s, err + } + if tok, _ = p.consume(Values); tok != ClosedParToken { + return nil, fmt.Errorf("found '%s', expected: ')'", lit) + } + return s, nil + case ClosedParToken: // handles "()" + p.consume(Values) + return sets.NewString(""), nil + default: + return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) + } +} + +// parseIdentifiersList parses a (possibly empty) list of +// of comma separated (possibly empty) identifiers +func (p *Parser) parseIdentifiersList() (sets.String, error) { + s := sets.NewString() + for { + tok, lit := p.consume(Values) + switch tok { + case IdentifierToken: + s.Insert(lit) + tok2, lit2 := p.lookahead(Values) + switch tok2 { + case CommaToken: + continue + case ClosedParToken: + return s, nil + default: + return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2) + } + case CommaToken: // handled here since we can have "(," + if s.Len() == 0 { + s.Insert("") // to handle (, + } + tok2, _ := p.lookahead(Values) + if tok2 == ClosedParToken { + s.Insert("") // to handle ,) Double "" removed by StringSet + return s, nil + } + if tok2 == CommaToken { + p.consume(Values) + s.Insert("") // to handle ,, Double "" removed by StringSet + } + default: // it can be operator + return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit) + } + } +} + +// parseExactValue parses the only value for exact match style +func (p *Parser) parseExactValue() (sets.String, error) { + s := sets.NewString() + tok, lit := p.lookahead(Values) + if tok == EndOfStringToken || tok == CommaToken { + s.Insert("") + return s, nil + } + tok, lit = p.consume(Values) + if tok == IdentifierToken { + s.Insert(lit) + return s, nil + } + return nil, fmt.Errorf("found '%s', expected: identifier", lit) +} + +// Parse takes a string representing a selector and returns a selector +// object, or an error. This parsing function differs from ParseSelector +// as they parse different selectors with different syntaxes. +// The input will cause an error if it does not follow this form: +// +// ::= | "," +// ::= [!] KEY [ | ] +// ::= "" | +// ::= | +// ::= "notin" +// ::= "in" +// ::= "(" ")" +// ::= VALUE | VALUE "," +// ::= ["="|"=="|"!="] VALUE +// +// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. +// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. +// Delimiter is white space: (' ', '\t') +// Example of valid syntax: +// "x in (foo,,baz),y,z notin ()" +// +// Note: +// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// (2) Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// (3) The empty string is a valid VALUE +// (4) A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// (5) A requirement with just !KEY requires that the KEY not exist. +// +func Parse(selector string) (Selector, error) { + parsedSelector, err := parse(selector) + if err == nil { + return parsedSelector, nil + } + return nil, err +} + +// parse parses the string representation of the selector and returns the internalSelector struct. +// The callers of this method can then decide how to return the internalSelector struct to their +// callers. This function has two callers now, one returns a Selector interface and the other +// returns a list of requirements. +func parse(selector string) (internalSelector, error) { + p := &Parser{l: &Lexer{s: selector, pos: 0}} + items, err := p.parse() + if err != nil { + return nil, err + } + sort.Sort(ByKey(items)) // sort to grant determistic parsing + return internalSelector(items), err +} + +func validateLabelKey(k string) error { + if errs := validation.IsQualifiedName(k); len(errs) != 0 { + return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; ")) + } + return nil +} + +func validateLabelValue(v string) error { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + } + return nil +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil and empty Sets are considered equivalent to Everything(). +func SelectorFromSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + var requirements internalSelector + for label, value := range ls { + r, err := NewRequirement(label, selection.Equals, []string{value}) + if err == nil { + requirements = append(requirements, *r) + } else { + //TODO: double check errors when input comes from serialization? + return internalSelector{} + } + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return requirements +} + +// SelectorFromValidatedSet returns a Selector which will match exactly the given Set. +// A nil and empty Sets are considered equivalent to Everything(). +// It assumes that Set is already validated and doesn't do any validation. +func SelectorFromValidatedSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + var requirements internalSelector + for label, value := range ls { + requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return requirements +} + +// ParseToRequirements takes a string representing a selector and returns a list of +// requirements. This function is suitable for those callers that perform additional +// processing on selector requirements. +// See the documentation for Parse() function for more details. +// TODO: Consider exporting the internalSelector type instead. +func ParseToRequirements(selector string) ([]Requirement, error) { + return parse(selector) +} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go new file mode 100644 index 000000000000..4d482947fcd1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -0,0 +1,42 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package labels + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Requirement) DeepCopyInto(out *Requirement) { + *out = *in + if in.strValues != nil { + in, out := &in.strValues, &out.strValues + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement. +func (in *Requirement) DeepCopy() *Requirement { + if in == nil { + return nil + } + out := new(Requirement) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go new file mode 100644 index 000000000000..284e32bc3cb8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -0,0 +1,332 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/url" + "reflect" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// codec binds an encoder and decoder. +type codec struct { + Encoder + Decoder +} + +// NewCodec creates a Codec from an Encoder and Decoder. +func NewCodec(e Encoder, d Decoder) Codec { + return codec{e, d} +} + +// Encode is a convenience wrapper for encoding to a []byte from an Encoder +func Encode(e Encoder, obj Object) ([]byte, error) { + // TODO: reuse buffer + buf := &bytes.Buffer{} + if err := e.Encode(obj, buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Decode is a convenience wrapper for decoding data into an Object. +func Decode(d Decoder, data []byte) (Object, error) { + obj, _, err := d.Decode(data, nil, nil) + return obj, err +} + +// DecodeInto performs a Decode into the provided object. +func DecodeInto(d Decoder, data []byte, into Object) error { + out, gvk, err := d.Decode(data, nil, into) + if err != nil { + return err + } + if out != into { + return fmt.Errorf("unable to decode %s into %v", gvk, reflect.TypeOf(into)) + } + return nil +} + +// EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests. +func EncodeOrDie(e Encoder, obj Object) string { + bytes, err := Encode(e, obj) + if err != nil { + panic(err) + } + return string(bytes) +} + +// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or +// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object. +func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk schema.GroupVersionKind, obj Object) (Object, error) { + if obj != nil { + kinds, _, err := t.ObjectKinds(obj) + if err != nil { + return nil, err + } + for _, kind := range kinds { + if gvk == kind { + return obj, nil + } + } + } + return c.New(gvk) +} + +// NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding. +type NoopEncoder struct { + Decoder +} + +var _ Serializer = NoopEncoder{} + +func (n NoopEncoder) Encode(obj Object, w io.Writer) error { + return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) +} + +// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. +type NoopDecoder struct { + Encoder +} + +var _ Serializer = NoopDecoder{} + +func (n NoopDecoder) Decode(data []byte, gvk *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + return nil, nil, fmt.Errorf("decoding is not allowed for this codec: %v", reflect.TypeOf(n.Encoder)) +} + +// NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back. +func NewParameterCodec(scheme *Scheme) ParameterCodec { + return ¶meterCodec{ + typer: scheme, + convertor: scheme, + creator: scheme, + defaulter: scheme, + } +} + +// parameterCodec implements conversion to and from query parameters and objects. +type parameterCodec struct { + typer ObjectTyper + convertor ObjectConvertor + creator ObjectCreater + defaulter ObjectDefaulter +} + +var _ ParameterCodec = ¶meterCodec{} + +// DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then +// converts that object to into (if necessary). Returns an error if the operation cannot be completed. +func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error { + if len(parameters) == 0 { + return nil + } + targetGVKs, _, err := c.typer.ObjectKinds(into) + if err != nil { + return err + } + for i := range targetGVKs { + if targetGVKs[i].GroupVersion() == from { + if err := c.convertor.Convert(¶meters, into, nil); err != nil { + return err + } + // in the case where we going into the same object we're receiving, default on the outbound object + if c.defaulter != nil { + c.defaulter.Default(into) + } + return nil + } + } + + input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind)) + if err != nil { + return err + } + if err := c.convertor.Convert(¶meters, input, nil); err != nil { + return err + } + // if we have defaulter, default the input before converting to output + if c.defaulter != nil { + c.defaulter.Default(input) + } + return c.convertor.Convert(input, into, nil) +} + +// EncodeParameters converts the provided object into the to version, then converts that object to url.Values. +// Returns an error if conversion is not possible. +func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) { + gvks, _, err := c.typer.ObjectKinds(obj) + if err != nil { + return nil, err + } + gvk := gvks[0] + if to != gvk.GroupVersion() { + out, err := c.convertor.ConvertToVersion(obj, to) + if err != nil { + return nil, err + } + obj = out + } + return queryparams.Convert(obj) +} + +type base64Serializer struct { + Encoder + Decoder +} + +func NewBase64Serializer(e Encoder, d Decoder) Serializer { + return &base64Serializer{e, d} +} + +func (s base64Serializer) Encode(obj Object, stream io.Writer) error { + e := base64.NewEncoder(base64.StdEncoding, stream) + err := s.Encoder.Encode(obj, e) + e.Close() + return err +} + +func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) + n, err := base64.StdEncoding.Decode(out, data) + if err != nil { + return nil, nil, err + } + return s.Decoder.Decode(out[:n], defaults, into) +} + +// SerializerInfoForMediaType returns the first info in types that has a matching media type (which cannot +// include media-type parameters), or the first info with an empty media type, or false if no type matches. +func SerializerInfoForMediaType(types []SerializerInfo, mediaType string) (SerializerInfo, bool) { + for _, info := range types { + if info.MediaType == mediaType { + return info, true + } + } + for _, info := range types { + if len(info.MediaType) == 0 { + return info, true + } + } + return SerializerInfo{}, false +} + +var ( + // InternalGroupVersioner will always prefer the internal version for a given group version kind. + InternalGroupVersioner GroupVersioner = internalGroupVersioner{} + // DisabledGroupVersioner will reject all kinds passed to it. + DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{} +) + +type internalGroupVersioner struct{} + +// KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version. +func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, kind := range kinds { + if kind.Version == APIVersionInternal { + return kind, true + } + } + for _, kind := range kinds { + return schema.GroupVersionKind{Group: kind.Group, Version: APIVersionInternal, Kind: kind.Kind}, true + } + return schema.GroupVersionKind{}, false +} + +type disabledGroupVersioner struct{} + +// KindForGroupVersionKinds returns false for any input. +func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + return schema.GroupVersionKind{}, false +} + +// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind. +type GroupVersioners []GroupVersioner + +// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred. +func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, gv := range gvs { + target, ok := gv.KindForGroupVersionKinds(kinds) + if !ok { + continue + } + return target, true + } + return schema.GroupVersionKind{}, false +} + +// Assert that schema.GroupVersion and GroupVersions implement GroupVersioner +var _ GroupVersioner = schema.GroupVersion{} +var _ GroupVersioner = schema.GroupVersions{} +var _ GroupVersioner = multiGroupVersioner{} + +type multiGroupVersioner struct { + target schema.GroupVersion + acceptedGroupKinds []schema.GroupKind + coerce bool +} + +// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds. +// Kind may be empty in the provided group kind, in which case any kind will match. +func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { + if len(groupKinds) == 0 || (len(groupKinds) == 1 && groupKinds[0].Group == gv.Group) { + return gv + } + return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds} +} + +// NewCoercingMultiGroupVersioner returns the provided group version for any incoming kind. +// Incoming kinds that match the provided groupKinds are preferred. +// Kind may be empty in the provided group kind, in which case any kind will match. +// Examples: +// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list) +func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { + return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true} +} + +// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will +// use the originating kind where possible. +func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, src := range kinds { + for _, kind := range v.acceptedGroupKinds { + if kind.Group != src.Group { + continue + } + if len(kind.Kind) > 0 && kind.Kind != src.Kind { + continue + } + return v.target.WithKind(src.Kind), true + } + } + if v.coerce && len(kinds) > 0 { + return v.target.WithKind(kinds[0].Kind), true + } + return schema.GroupVersionKind{}, false +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go new file mode 100644 index 000000000000..510444a4dec2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go @@ -0,0 +1,48 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// CheckCodec makes sure that the codec can encode objects like internalType, +// decode all of the external types listed, and also decode them into the given +// object. (Will modify internalObject.) (Assumes JSON serialization.) +// TODO: verify that the correct external version is chosen on encode... +func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error { + if _, err := Encode(c, internalType); err != nil { + return fmt.Errorf("Internal type not encodable: %v", err) + } + for _, et := range externalTypes { + exBytes := []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v"}`, et.Kind, et.GroupVersion().String())) + obj, err := Decode(c, exBytes) + if err != nil { + return fmt.Errorf("external type %s not interpretable: %v", et, err) + } + if reflect.TypeOf(obj) != reflect.TypeOf(internalType) { + return fmt.Errorf("decode of external type %s produced: %#v", et, obj) + } + if err = DecodeInto(c, exBytes, internalType); err != nil { + return fmt.Errorf("external type %s not convertible to internal type: %v", et, err) + } + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go new file mode 100644 index 000000000000..08d2abfe687d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -0,0 +1,113 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package runtime defines conversions between generic types and structs to map query strings +// to struct objects. +package runtime + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/conversion" +) + +// DefaultMetaV1FieldSelectorConversion auto-accepts metav1 values for name and namespace. +// A cluster scoped resource specifying namespace empty works fine and specifying a particular +// namespace will return no results, as expected. +func DefaultMetaV1FieldSelectorConversion(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + case "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("%q is not a known field selector: only %q, %q", label, "metadata.name", "metadata.namespace") + } +} + +// JSONKeyMapper uses the struct tags on a conversion to determine the key value for +// the other side. Use when mapping from a map[string]* to a struct or vice versa. +func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) { + if s := destTag.Get("json"); len(s) > 0 { + return strings.SplitN(s, ",", 2)[0], key + } + if s := sourceTag.Get("json"); len(s) > 0 { + return key, strings.SplitN(s, ",", 2)[0] + } + return key, key +} + +// DefaultStringConversions are helpers for converting []string and string to real values. +var DefaultStringConversions = []interface{}{ + Convert_Slice_string_To_string, + Convert_Slice_string_To_int, + Convert_Slice_string_To_bool, + Convert_Slice_string_To_int64, +} + +func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { + if len(*input) == 0 { + *out = "" + } + *out = (*input)[0] + return nil +} + +func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { + if len(*input) == 0 { + *out = 0 + } + str := (*input)[0] + i, err := strconv.Atoi(str) + if err != nil { + return err + } + *out = i + return nil +} + +// Convert_Slice_string_To_bool will convert a string parameter to boolean. +// Only the absence of a value, a value of "false", or a value of "0" resolve to false. +// Any other value (including empty string) resolves to true. +func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { + if len(*input) == 0 { + *out = false + return nil + } + switch strings.ToLower((*input)[0]) { + case "false", "0": + *out = false + default: + *out = true + } + return nil +} + +func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { + if len(*input) == 0 { + *out = 0 + } + str := (*input)[0] + i, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *out = i + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go new file mode 100644 index 000000000000..dff56e03401a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -0,0 +1,805 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + encodingjson "encoding/json" + "fmt" + "math" + "os" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + "k8s.io/klog" +) + +// UnstructuredConverter is an interface for converting between interface{} +// and map[string]interface representation. +type UnstructuredConverter interface { + ToUnstructured(obj interface{}) (map[string]interface{}, error) + FromUnstructured(u map[string]interface{}, obj interface{}) error +} + +type structField struct { + structType reflect.Type + field int +} + +type fieldInfo struct { + name string + nameValue reflect.Value + omitempty bool +} + +type fieldsCacheMap map[structField]*fieldInfo + +type fieldsCache struct { + sync.Mutex + value atomic.Value +} + +func newFieldsCache() *fieldsCache { + cache := &fieldsCache{} + cache.value.Store(make(fieldsCacheMap)) + return cache +} + +var ( + marshalerType = reflect.TypeOf(new(encodingjson.Marshaler)).Elem() + unmarshalerType = reflect.TypeOf(new(encodingjson.Unmarshaler)).Elem() + mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) + stringType = reflect.TypeOf(string("")) + int64Type = reflect.TypeOf(int64(0)) + float64Type = reflect.TypeOf(float64(0)) + boolType = reflect.TypeOf(bool(false)) + fieldCache = newFieldsCache() + + // DefaultUnstructuredConverter performs unstructured to Go typed object conversions. + DefaultUnstructuredConverter = &unstructuredConverter{ + mismatchDetection: parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR")), + comparison: conversion.EqualitiesOrDie( + func(a, b time.Time) bool { + return a.UTC() == b.UTC() + }, + ), + } +) + +func parseBool(key string) bool { + if len(key) == 0 { + return false + } + value, err := strconv.ParseBool(key) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key)) + } + return value +} + +// unstructuredConverter knows how to convert between interface{} and +// Unstructured in both ways. +type unstructuredConverter struct { + // If true, we will be additionally running conversion via json + // to ensure that the result is true. + // This is supposed to be set only in tests. + mismatchDetection bool + // comparison is the default test logic used to compare + comparison conversion.Equalities +} + +// NewTestUnstructuredConverter creates an UnstructuredConverter that accepts JSON typed maps and translates them +// to Go types via reflection. It performs mismatch detection automatically and is intended for use by external +// test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection. +func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter { + return &unstructuredConverter{ + mismatchDetection: true, + comparison: comparison, + } +} + +// FromUnstructured converts an object from map[string]interface{} representation into a concrete type. +// It uses encoding/json/Unmarshaler if object implements it or reflection if not. +func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error { + t := reflect.TypeOf(obj) + value := reflect.ValueOf(obj) + if t.Kind() != reflect.Ptr || value.IsNil() { + return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t) + } + err := fromUnstructured(reflect.ValueOf(u), value.Elem()) + if c.mismatchDetection { + newObj := reflect.New(t.Elem()).Interface() + newErr := fromUnstructuredViaJSON(u, newObj) + if (err != nil) != (newErr != nil) { + klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) + } + if err == nil && !c.comparison.DeepEqual(obj, newObj) { + klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) + } + } + return err +} + +func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error { + data, err := json.Marshal(u) + if err != nil { + return err + } + return json.Unmarshal(data, obj) +} + +func fromUnstructured(sv, dv reflect.Value) error { + sv = unwrapInterface(sv) + if !sv.IsValid() { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + st, dt := sv.Type(), dv.Type() + + switch dt.Kind() { + case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Struct, reflect.Interface: + // Those require non-trivial conversion. + default: + // This should handle all simple types. + if st.AssignableTo(dt) { + dv.Set(sv) + return nil + } + // We cannot simply use "ConvertibleTo", as JSON doesn't support conversions + // between those four groups: bools, integers, floats and string. We need to + // do the same. + if st.ConvertibleTo(dt) { + switch st.Kind() { + case reflect.String: + switch dt.Kind() { + case reflect.String: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Bool: + switch dt.Kind() { + case reflect.Bool: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch dt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Float32, reflect.Float64: + switch dt.Kind() { + case reflect.Float32, reflect.Float64: + dv.Set(sv.Convert(dt)) + return nil + } + if sv.Float() == math.Trunc(sv.Float()) { + dv.Set(sv.Convert(dt)) + return nil + } + } + return fmt.Errorf("cannot convert %s to %s", st.String(), dt.String()) + } + } + + // Check if the object has a custom JSON marshaller/unmarshaller. + if reflect.PtrTo(dt).Implements(unmarshalerType) { + data, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st.String(), err) + } + unmarshaler := dv.Addr().Interface().(encodingjson.Unmarshaler) + return unmarshaler.UnmarshalJSON(data) + } + + switch dt.Kind() { + case reflect.Map: + return mapFromUnstructured(sv, dv) + case reflect.Slice: + return sliceFromUnstructured(sv, dv) + case reflect.Ptr: + return pointerFromUnstructured(sv, dv) + case reflect.Struct: + return structFromUnstructured(sv, dv) + case reflect.Interface: + return interfaceFromUnstructured(sv, dv) + default: + return fmt.Errorf("unrecognized type: %v", dt.Kind()) + } +} + +func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo { + fieldCacheMap := fieldCache.value.Load().(fieldsCacheMap) + if info, ok := fieldCacheMap[structField{structType, field}]; ok { + return info + } + + // Cache miss - we need to compute the field name. + info := &fieldInfo{} + typeField := structType.Field(field) + jsonTag := typeField.Tag.Get("json") + if len(jsonTag) == 0 { + // Make the first character lowercase. + if typeField.Name == "" { + info.name = typeField.Name + } else { + info.name = strings.ToLower(typeField.Name[:1]) + typeField.Name[1:] + } + } else { + items := strings.Split(jsonTag, ",") + info.name = items[0] + for i := range items { + if items[i] == "omitempty" { + info.omitempty = true + } + } + } + info.nameValue = reflect.ValueOf(info.name) + + fieldCache.Lock() + defer fieldCache.Unlock() + fieldCacheMap = fieldCache.value.Load().(fieldsCacheMap) + newFieldCacheMap := make(fieldsCacheMap) + for k, v := range fieldCacheMap { + newFieldCacheMap[k] = v + } + newFieldCacheMap[structField{structType, field}] = info + fieldCache.value.Store(newFieldCacheMap) + return info +} + +func unwrapInterface(v reflect.Value) reflect.Value { + for v.Kind() == reflect.Interface { + v = v.Elem() + } + return v +} + +func mapFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() != reflect.Map { + return fmt.Errorf("cannot restore map from %v", st.Kind()) + } + + if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) { + return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key()) + } + + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeMap(dt)) + for _, key := range sv.MapKeys() { + value := reflect.New(dt.Elem()).Elem() + if val := unwrapInterface(sv.MapIndex(key)); val.IsValid() { + if err := fromUnstructured(val, value); err != nil { + return err + } + } else { + value.Set(reflect.Zero(dt.Elem())) + } + if st.Key().AssignableTo(dt.Key()) { + dv.SetMapIndex(key, value) + } else { + dv.SetMapIndex(key.Convert(dt.Key()), value) + } + } + return nil +} + +func sliceFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() == reflect.String && dt.Elem().Kind() == reflect.Uint8 { + // We store original []byte representation as string. + // This conversion is allowed, but we need to be careful about + // marshaling data appropriately. + if len(sv.Interface().(string)) > 0 { + marshalled, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st, err) + } + // TODO: Is this Unmarshal needed? + var data []byte + err = json.Unmarshal(marshalled, &data) + if err != nil { + return fmt.Errorf("error decoding from json: %v", err) + } + dv.SetBytes(data) + } else { + dv.Set(reflect.Zero(dt)) + } + return nil + } + if st.Kind() != reflect.Slice { + return fmt.Errorf("cannot restore slice from %v", st.Kind()) + } + + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) + for i := 0; i < sv.Len(); i++ { + if err := fromUnstructured(sv.Index(i), dv.Index(i)); err != nil { + return err + } + } + return nil +} + +func pointerFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + + if st.Kind() == reflect.Ptr && sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.New(dt.Elem())) + switch st.Kind() { + case reflect.Ptr, reflect.Interface: + return fromUnstructured(sv.Elem(), dv.Elem()) + default: + return fromUnstructured(sv, dv.Elem()) + } +} + +func structFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() != reflect.Map { + return fmt.Errorf("cannot restore struct from: %v", st.Kind()) + } + + for i := 0; i < dt.NumField(); i++ { + fieldInfo := fieldInfoFromField(dt, i) + fv := dv.Field(i) + + if len(fieldInfo.name) == 0 { + // This field is inlined. + if err := fromUnstructured(sv, fv); err != nil { + return err + } + } else { + value := unwrapInterface(sv.MapIndex(fieldInfo.nameValue)) + if value.IsValid() { + if err := fromUnstructured(value, fv); err != nil { + return err + } + } else { + fv.Set(reflect.Zero(fv.Type())) + } + } + } + return nil +} + +func interfaceFromUnstructured(sv, dv reflect.Value) error { + // TODO: Is this conversion safe? + dv.Set(sv) + return nil +} + +// ToUnstructured converts an object into map[string]interface{} representation. +// It uses encoding/json/Marshaler if object implements it or reflection if not. +func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]interface{}, error) { + var u map[string]interface{} + var err error + if unstr, ok := obj.(Unstructured); ok { + u = unstr.UnstructuredContent() + } else { + t := reflect.TypeOf(obj) + value := reflect.ValueOf(obj) + if t.Kind() != reflect.Ptr || value.IsNil() { + return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) + } + u = map[string]interface{}{} + err = toUnstructured(value.Elem(), reflect.ValueOf(&u).Elem()) + } + if c.mismatchDetection { + newUnstr := map[string]interface{}{} + newErr := toUnstructuredViaJSON(obj, &newUnstr) + if (err != nil) != (newErr != nil) { + klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) + } + if err == nil && !c.comparison.DeepEqual(u, newUnstr) { + klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) + } + } + if err != nil { + return nil, err + } + return u, nil +} + +// DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains +// types produced by json.Unmarshal() and also int64. +// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil +func DeepCopyJSON(x map[string]interface{}) map[string]interface{} { + return DeepCopyJSONValue(x).(map[string]interface{}) +} + +// DeepCopyJSONValue deep copies the passed value, assuming it is a valid JSON representation i.e. only contains +// types produced by json.Unmarshal() and also int64. +// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil +func DeepCopyJSONValue(x interface{}) interface{} { + switch x := x.(type) { + case map[string]interface{}: + if x == nil { + // Typed nil - an interface{} that contains a type map[string]interface{} with a value of nil + return x + } + clone := make(map[string]interface{}, len(x)) + for k, v := range x { + clone[k] = DeepCopyJSONValue(v) + } + return clone + case []interface{}: + if x == nil { + // Typed nil - an interface{} that contains a type []interface{} with a value of nil + return x + } + clone := make([]interface{}, len(x)) + for i, v := range x { + clone[i] = DeepCopyJSONValue(v) + } + return clone + case string, int64, bool, float64, nil, encodingjson.Number: + return x + default: + panic(fmt.Errorf("cannot deep copy %T", x)) + } +} + +func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error { + data, err := json.Marshal(obj) + if err != nil { + return err + } + return json.Unmarshal(data, u) +} + +var ( + nullBytes = []byte("null") + trueBytes = []byte("true") + falseBytes = []byte("false") +) + +func getMarshaler(v reflect.Value) (encodingjson.Marshaler, bool) { + // Check value receivers if v is not a pointer and pointer receivers if v is a pointer + if v.Type().Implements(marshalerType) { + return v.Interface().(encodingjson.Marshaler), true + } + // Check pointer receivers if v is not a pointer + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + if v.Type().Implements(marshalerType) { + return v.Interface().(encodingjson.Marshaler), true + } + } + return nil, false +} + +func toUnstructured(sv, dv reflect.Value) error { + // Check if the object has a custom JSON marshaller/unmarshaller. + if marshaler, ok := getMarshaler(sv); ok { + if sv.Kind() == reflect.Ptr && sv.IsNil() { + // We're done - we don't need to store anything. + return nil + } + + data, err := marshaler.MarshalJSON() + if err != nil { + return err + } + switch { + case len(data) == 0: + return fmt.Errorf("error decoding from json: empty value") + + case bytes.Equal(data, nullBytes): + // We're done - we don't need to store anything. + + case bytes.Equal(data, trueBytes): + dv.Set(reflect.ValueOf(true)) + + case bytes.Equal(data, falseBytes): + dv.Set(reflect.ValueOf(false)) + + case data[0] == '"': + var result string + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding string from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + + case data[0] == '{': + result := make(map[string]interface{}) + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding object from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + + case data[0] == '[': + result := make([]interface{}, 0) + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding array from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + + default: + var ( + resultInt int64 + resultFloat float64 + err error + ) + if err = json.Unmarshal(data, &resultInt); err == nil { + dv.Set(reflect.ValueOf(resultInt)) + } else if err = json.Unmarshal(data, &resultFloat); err == nil { + dv.Set(reflect.ValueOf(resultFloat)) + } else { + return fmt.Errorf("error decoding number from json: %v", err) + } + } + + return nil + } + + st, dt := sv.Type(), dv.Type() + switch st.Kind() { + case reflect.String: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(stringType)) + } + dv.Set(reflect.ValueOf(sv.String())) + return nil + case reflect.Bool: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(boolType)) + } + dv.Set(reflect.ValueOf(sv.Bool())) + return nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(int64Type)) + } + dv.Set(reflect.ValueOf(sv.Int())) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + uVal := sv.Uint() + if uVal > math.MaxInt64 { + return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal) + } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(int64Type)) + } + dv.Set(reflect.ValueOf(int64(uVal))) + return nil + case reflect.Float32, reflect.Float64: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(float64Type)) + } + dv.Set(reflect.ValueOf(sv.Float())) + return nil + case reflect.Map: + return mapToUnstructured(sv, dv) + case reflect.Slice: + return sliceToUnstructured(sv, dv) + case reflect.Ptr: + return pointerToUnstructured(sv, dv) + case reflect.Struct: + return structToUnstructured(sv, dv) + case reflect.Interface: + return interfaceToUnstructured(sv, dv) + default: + return fmt.Errorf("unrecognized type: %v", st.Kind()) + } +} + +func mapToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + if st.Key().Kind() == reflect.String { + switch st.Elem().Kind() { + // TODO It should be possible to reuse the slice for primitive types. + // However, it is panicing in the following form. + // case reflect.String, reflect.Bool, + // reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + // reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // sv.Set(sv) + // return nil + default: + // We need to do a proper conversion. + } + } + dv.Set(reflect.MakeMap(mapStringInterfaceType)) + dv = dv.Elem() + dt = dv.Type() + } + if dt.Kind() != reflect.Map { + return fmt.Errorf("cannot convert struct to: %v", dt.Kind()) + } + + if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) { + return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key()) + } + + for _, key := range sv.MapKeys() { + value := reflect.New(dt.Elem()).Elem() + if err := toUnstructured(sv.MapIndex(key), value); err != nil { + return err + } + if st.Key().AssignableTo(dt.Key()) { + dv.SetMapIndex(key, value) + } else { + dv.SetMapIndex(key.Convert(dt.Key()), value) + } + } + return nil +} + +func sliceToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + if st.Elem().Kind() == reflect.Uint8 { + dv.Set(reflect.New(stringType)) + data, err := json.Marshal(sv.Bytes()) + if err != nil { + return err + } + var result string + if err = json.Unmarshal(data, &result); err != nil { + return err + } + dv.Set(reflect.ValueOf(result)) + return nil + } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + switch st.Elem().Kind() { + // TODO It should be possible to reuse the slice for primitive types. + // However, it is panicing in the following form. + // case reflect.String, reflect.Bool, + // reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + // reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // sv.Set(sv) + // return nil + default: + // We need to do a proper conversion. + dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap())) + dv = dv.Elem() + dt = dv.Type() + } + } + if dt.Kind() != reflect.Slice { + return fmt.Errorf("cannot convert slice to: %v", dt.Kind()) + } + for i := 0; i < sv.Len(); i++ { + if err := toUnstructured(sv.Index(i), dv.Index(i)); err != nil { + return err + } + } + return nil +} + +func pointerToUnstructured(sv, dv reflect.Value) error { + if sv.IsNil() { + // We're done - we don't need to store anything. + return nil + } + return toUnstructured(sv.Elem(), dv) +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Map, reflect.Slice: + // TODO: It seems that 0-len maps are ignored in it. + return v.IsNil() || v.Len() == 0 + case reflect.Ptr, reflect.Interface: + return v.IsNil() + } + return false +} + +func structToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.MakeMap(mapStringInterfaceType)) + dv = dv.Elem() + dt = dv.Type() + } + if dt.Kind() != reflect.Map { + return fmt.Errorf("cannot convert struct to: %v", dt.Kind()) + } + realMap := dv.Interface().(map[string]interface{}) + + for i := 0; i < st.NumField(); i++ { + fieldInfo := fieldInfoFromField(st, i) + fv := sv.Field(i) + + if fieldInfo.name == "-" { + // This field should be skipped. + continue + } + if fieldInfo.omitempty && isZero(fv) { + // omitempty fields should be ignored. + continue + } + if len(fieldInfo.name) == 0 { + // This field is inlined. + if err := toUnstructured(fv, dv); err != nil { + return err + } + continue + } + switch fv.Type().Kind() { + case reflect.String: + realMap[fieldInfo.name] = fv.String() + case reflect.Bool: + realMap[fieldInfo.name] = fv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + realMap[fieldInfo.name] = fv.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + realMap[fieldInfo.name] = fv.Uint() + case reflect.Float32, reflect.Float64: + realMap[fieldInfo.name] = fv.Float() + default: + subv := reflect.New(dt.Elem()).Elem() + if err := toUnstructured(fv, subv); err != nil { + return err + } + dv.SetMapIndex(fieldInfo.nameValue, subv) + } + } + return nil +} + +func interfaceToUnstructured(sv, dv reflect.Value) error { + if !sv.IsValid() || sv.IsNil() { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + return toUnstructured(sv.Elem(), dv) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go new file mode 100644 index 000000000000..89feb40103e6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go @@ -0,0 +1,51 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package runtime includes helper functions for working with API objects +// that follow the kubernetes API object conventions, which are: +// +// 0. Your API objects have a common metadata struct member, TypeMeta. +// +// 1. Your code refers to an internal set of API objects. +// +// 2. In a separate package, you have an external set of API objects. +// +// 3. The external set is considered to be versioned, and no breaking +// changes are ever made to it (fields may be added but not changed +// or removed). +// +// 4. As your api evolves, you'll make an additional versioned package +// with every major change. +// +// 5. Versioned packages have conversion functions which convert to +// and from the internal version. +// +// 6. You'll continue to support older versions according to your +// deprecation policy, and you can easily provide a program/library +// to update old versions into new versions because of 5. +// +// 7. All of your serializations and deserializations are handled in a +// centralized place. +// +// Package runtime provides a conversion helper to make 5 easy, and the +// Encode/Decode/DecodeInto trio to accomplish 7. You can also register +// additional "codecs" which use a version of your choice. It's +// recommended that you register your types with runtime in your +// package's init function. +// +// As a bonus, a few common types useful from all api objects and versions +// are provided in types.go. +package runtime // import "k8s.io/apimachinery/pkg/runtime" diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go new file mode 100644 index 000000000000..db11eb8bcf64 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go @@ -0,0 +1,142 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "errors" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type encodable struct { + E Encoder `json:"-"` + obj Object + versions []schema.GroupVersion +} + +func (e encodable) GetObjectKind() schema.ObjectKind { return e.obj.GetObjectKind() } +func (e encodable) DeepCopyObject() Object { + out := e + out.obj = e.obj.DeepCopyObject() + copy(out.versions, e.versions) + return out +} + +// NewEncodable creates an object that will be encoded with the provided codec on demand. +// Provided as a convenience for test cases dealing with internal objects. +func NewEncodable(e Encoder, obj Object, versions ...schema.GroupVersion) Object { + if _, ok := obj.(*Unknown); ok { + return obj + } + return encodable{e, obj, versions} +} + +func (e encodable) UnmarshalJSON(in []byte) error { + return errors.New("runtime.encodable cannot be unmarshalled from JSON") +} + +// Marshal may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (e encodable) MarshalJSON() ([]byte, error) { + return Encode(e.E, e.obj) +} + +// NewEncodableList creates an object that will be encoded with the provided codec on demand. +// Provided as a convenience for test cases dealing with internal objects. +func NewEncodableList(e Encoder, objects []Object, versions ...schema.GroupVersion) []Object { + out := make([]Object, len(objects)) + for i := range objects { + if _, ok := objects[i].(*Unknown); ok { + out[i] = objects[i] + continue + } + out[i] = NewEncodable(e, objects[i], versions...) + } + return out +} + +func (e *Unknown) UnmarshalJSON(in []byte) error { + if e == nil { + return errors.New("runtime.Unknown: UnmarshalJSON on nil pointer") + } + e.TypeMeta = TypeMeta{} + e.Raw = append(e.Raw[0:0], in...) + e.ContentEncoding = "" + e.ContentType = ContentTypeJSON + return nil +} + +// Marshal may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (e Unknown) MarshalJSON() ([]byte, error) { + // If ContentType is unset, we assume this is JSON. + if e.ContentType != "" && e.ContentType != ContentTypeJSON { + return nil, errors.New("runtime.Unknown: MarshalJSON on non-json data") + } + if e.Raw == nil { + return []byte("null"), nil + } + return e.Raw, nil +} + +func Convert_runtime_Object_To_runtime_RawExtension(in *Object, out *RawExtension, s conversion.Scope) error { + if in == nil { + out.Raw = []byte("null") + return nil + } + obj := *in + if unk, ok := obj.(*Unknown); ok { + if unk.Raw != nil { + out.Raw = unk.Raw + return nil + } + obj = out.Object + } + if obj == nil { + out.Raw = nil + return nil + } + out.Object = obj + return nil +} + +func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Object, s conversion.Scope) error { + if in.Object != nil { + *out = in.Object + return nil + } + data := in.Raw + if len(data) == 0 || (len(data) == 4 && string(data) == "null") { + *out = nil + return nil + } + *out = &Unknown{ + Raw: data, + // TODO: Set ContentEncoding and ContentType appropriately. + // Currently we set ContentTypeJSON to make tests passing. + ContentType: ContentTypeJSON, + } + return nil +} + +func DefaultEmbeddedConversions() []interface{} { + return []interface{}{ + Convert_runtime_Object_To_runtime_RawExtension, + Convert_runtime_RawExtension_To_runtime_Object, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go new file mode 100644 index 000000000000..322b0313df55 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -0,0 +1,122 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type notRegisteredErr struct { + schemeName string + gvk schema.GroupVersionKind + target GroupVersioner + t reflect.Type +} + +func NewNotRegisteredErrForKind(schemeName string, gvk schema.GroupVersionKind) error { + return ¬RegisteredErr{schemeName: schemeName, gvk: gvk} +} + +func NewNotRegisteredErrForType(schemeName string, t reflect.Type) error { + return ¬RegisteredErr{schemeName: schemeName, t: t} +} + +func NewNotRegisteredErrForTarget(schemeName string, t reflect.Type, target GroupVersioner) error { + return ¬RegisteredErr{schemeName: schemeName, t: t, target: target} +} + +func NewNotRegisteredGVKErrForTarget(schemeName string, gvk schema.GroupVersionKind, target GroupVersioner) error { + return ¬RegisteredErr{schemeName: schemeName, gvk: gvk, target: target} +} + +func (k *notRegisteredErr) Error() string { + if k.t != nil && k.target != nil { + return fmt.Sprintf("%v is not suitable for converting to %q in scheme %q", k.t, k.target, k.schemeName) + } + nullGVK := schema.GroupVersionKind{} + if k.gvk != nullGVK && k.target != nil { + return fmt.Sprintf("%q is not suitable for converting to %q in scheme %q", k.gvk.GroupVersion(), k.target, k.schemeName) + } + if k.t != nil { + return fmt.Sprintf("no kind is registered for the type %v in scheme %q", k.t, k.schemeName) + } + if len(k.gvk.Kind) == 0 { + return fmt.Sprintf("no version %q has been registered in scheme %q", k.gvk.GroupVersion(), k.schemeName) + } + if k.gvk.Version == APIVersionInternal { + return fmt.Sprintf("no kind %q is registered for the internal version of group %q in scheme %q", k.gvk.Kind, k.gvk.Group, k.schemeName) + } + + return fmt.Sprintf("no kind %q is registered for version %q in scheme %q", k.gvk.Kind, k.gvk.GroupVersion(), k.schemeName) +} + +// IsNotRegisteredError returns true if the error indicates the provided +// object or input data is not registered. +func IsNotRegisteredError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*notRegisteredErr) + return ok +} + +type missingKindErr struct { + data string +} + +func NewMissingKindErr(data string) error { + return &missingKindErr{data} +} + +func (k *missingKindErr) Error() string { + return fmt.Sprintf("Object 'Kind' is missing in '%s'", k.data) +} + +// IsMissingKind returns true if the error indicates that the provided object +// is missing a 'Kind' field. +func IsMissingKind(err error) bool { + if err == nil { + return false + } + _, ok := err.(*missingKindErr) + return ok +} + +type missingVersionErr struct { + data string +} + +func NewMissingVersionErr(data string) error { + return &missingVersionErr{data} +} + +func (k *missingVersionErr) Error() string { + return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) +} + +// IsMissingVersion returns true if the error indicates that the provided object +// is missing a 'Version' field. +func IsMissingVersion(err error) bool { + if err == nil { + return false + } + _, ok := err.(*missingVersionErr) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go new file mode 100644 index 000000000000..9056397fa517 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -0,0 +1,51 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "encoding/json" + "errors" +) + +func (re *RawExtension) UnmarshalJSON(in []byte) error { + if re == nil { + return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") + } + if !bytes.Equal(in, []byte("null")) { + re.Raw = append(re.Raw[0:0], in...) + } + return nil +} + +// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (re RawExtension) MarshalJSON() ([]byte, error) { + if re.Raw == nil { + // TODO: this is to support legacy behavior of JSONPrinter and YAMLPrinter, which + // expect to call json.Marshal on arbitrary versioned objects (even those not in + // the scheme). pkg/kubectl/resource#AsVersionedObjects and its interaction with + // kubectl get on objects not in the scheme needs to be updated to ensure that the + // objects that are not part of the scheme are correctly put into the right form. + if re.Object != nil { + return json.Marshal(re.Object) + } + return []byte("null"), nil + } + // TODO: Check whether ContentType is actually JSON before returning it. + return re.Raw, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go new file mode 100644 index 000000000000..9b15989c8270 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -0,0 +1,753 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto + + It has these top-level messages: + RawExtension + TypeMeta + Unknown +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") + proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") +} +func (m *RawExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Raw != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i += copy(dAtA[i:], m.Raw) + } + return i, nil +} + +func (m *TypeMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil +} + +func (m *Unknown) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.Raw != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i += copy(dAtA[i:], m.Raw) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) + i += copy(dAtA[i:], m.ContentEncoding) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i += copy(dAtA[i:], m.ContentType) + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RawExtension) Size() (n int) { + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypeMeta) Size() (n int) { + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Unknown) Size() (n int) { + var l int + _ = l + l = m.TypeMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ContentEncoding) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContentType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RawExtension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RawExtension{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *TypeMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypeMeta{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *Unknown) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Unknown{`, + `TypeMeta:` + strings.Replace(strings.Replace(this.TypeMeta.String(), "TypeMeta", "TypeMeta", 1), `&`, ``, 1) + `,`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `ContentEncoding:` + fmt.Sprintf("%v", this.ContentEncoding) + `,`, + `ContentType:` + fmt.Sprintf("%v", this.ContentType) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RawExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Unknown) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Unknown: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TypeMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentEncoding = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, + 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, + 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, + 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, + 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, + 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, + 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, + 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, + 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, + 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, + 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, + 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, + 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, + 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, + 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, + 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, + 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, + 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, + 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, + 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, + 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, + 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, + 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto new file mode 100644 index 000000000000..0e212ec941ff --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -0,0 +1,127 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.runtime; + +// Package-wide variables from generator "generated". +option go_package = "runtime"; + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message RawExtension { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + optional bytes raw = 1; +} + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +k8s:deepcopy-gen=false +// +protobuf=true +// +k8s:openapi-gen=true +message TypeMeta { + // +optional + optional string apiVersion = 1; + + // +optional + optional string kind = 2; +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=true +// +k8s:openapi-gen=true +message Unknown { + optional TypeMeta typeMeta = 1; + + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + optional bytes raw = 2; + + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + optional string contentEncoding = 3; + + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + optional string contentType = 4; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go new file mode 100644 index 000000000000..33f11eb10d64 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -0,0 +1,212 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "io" + "reflect" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/errors" +) + +// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path. +type unsafeObjectConvertor struct { + *Scheme +} + +var _ ObjectConvertor = unsafeObjectConvertor{} + +// ConvertToVersion converts in to the provided outVersion without copying the input first, which +// is only safe if the output object is not mutated or reused. +func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion GroupVersioner) (Object, error) { + return c.Scheme.UnsafeConvertToVersion(in, outVersion) +} + +// UnsafeObjectConvertor performs object conversion without copying the object structure, +// for use when the converted object will not be reused or mutated. Primarily for use within +// versioned codecs, which use the external object for serialization but do not return it. +func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { + return unsafeObjectConvertor{scheme} +} + +// SetField puts the value of src, into fieldName, which must be a member of v. +// The value of src must be assignable to the field. +func SetField(src interface{}, v reflect.Value, fieldName string) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + srcValue := reflect.ValueOf(src) + if srcValue.Type().AssignableTo(field.Type()) { + field.Set(srcValue) + return nil + } + if srcValue.Type().ConvertibleTo(field.Type()) { + field.Set(srcValue.Convert(field.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type()) +} + +// Field puts the value of fieldName, which must be a member of v, into dest, +// which must be a variable to which this field's value can be assigned. +func Field(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + destValue, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + if field.Type().AssignableTo(destValue.Type()) { + destValue.Set(field) + return nil + } + if field.Type().ConvertibleTo(destValue.Type()) { + destValue.Set(field.Convert(destValue.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type()) +} + +// FieldPtr puts the address of fieldName, which must be a member of v, +// into dest, which must be an address of a variable to which this field's +// address can be assigned. +func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + v, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + field = field.Addr() + if field.Type().AssignableTo(v.Type()) { + v.Set(field) + return nil + } + if field.Type().ConvertibleTo(v.Type()) { + v.Set(field.Convert(v.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type()) +} + +// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form. +// TODO: accept a content type. +func EncodeList(e Encoder, objects []Object) error { + var errs []error + for i := range objects { + data, err := Encode(e, objects[i]) + if err != nil { + errs = append(errs, err) + continue + } + // TODO: Set ContentEncoding and ContentType. + objects[i] = &Unknown{Raw: data} + } + return errors.NewAggregate(errs) +} + +func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) { + for _, decoder := range decoders { + // TODO: Decode based on ContentType. + obj, err := Decode(decoder, obj.Raw) + if err != nil { + if IsNotRegisteredError(err) { + continue + } + return nil, err + } + return obj, nil + } + // could not decode, so leave the object as Unknown, but give the decoders the + // chance to set Unknown.TypeMeta if it is available. + for _, decoder := range decoders { + if err := DecodeInto(decoder, obj.Raw, obj); err == nil { + return obj, nil + } + } + return obj, nil +} + +// DecodeList alters the list in place, attempting to decode any objects found in +// the list that have the Unknown type. Any errors that occur are returned +// after the entire list is processed. Decoders are tried in order. +func DecodeList(objects []Object, decoders ...Decoder) []error { + errs := []error(nil) + for i, obj := range objects { + switch t := obj.(type) { + case *Unknown: + decoded, err := decodeListItem(t, decoders) + if err != nil { + errs = append(errs, err) + break + } + objects[i] = decoded + } + } + return errs +} + +// MultiObjectTyper returns the types of objects across multiple schemes in order. +type MultiObjectTyper []ObjectTyper + +var _ ObjectTyper = MultiObjectTyper{} + +func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { + for _, t := range m { + gvks, unversionedType, err = t.ObjectKinds(obj) + if err == nil { + return + } + } + return +} + +func (m MultiObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { + for _, t := range m { + if t.Recognizes(gvk) { + return true + } + } + return false +} + +// SetZeroValue would set the object of objPtr to zero value of its type. +func SetZeroValue(objPtr Object) error { + v, err := conversion.EnforcePtr(objPtr) + if err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil +} + +// DefaultFramer is valid for any stream that can read objects serially without +// any separation in the stream. +var DefaultFramer = defaultFramer{} + +type defaultFramer struct{} + +func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } +func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go new file mode 100644 index 000000000000..699ff13e04f3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -0,0 +1,252 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "io" + "net/url" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // APIVersionInternal may be used if you are registering a type that should not + // be considered stable or serialized - it is a convention only and has no + // special behavior in this package. + APIVersionInternal = "__internal" +) + +// GroupVersioner refines a set of possible conversion targets into a single option. +type GroupVersioner interface { + // KindForGroupVersionKinds returns a desired target group version kind for the given input, or returns ok false if no + // target is known. In general, if the return target is not in the input list, the caller is expected to invoke + // Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type. + // Sophisticated implementations may use additional information about the input kinds to pick a destination kind. + KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool) +} + +// Encoder writes objects to a serialized form +type Encoder interface { + // Encode writes an object to a stream. Implementations may return errors if the versions are + // incompatible, or if no conversion is defined. + Encode(obj Object, w io.Writer) error +} + +// Decoder attempts to load an object from data. +type Decoder interface { + // Decode attempts to deserialize the provided data using either the innate typing of the scheme or the + // default kind, group, and version provided. It returns a decoded object as well as the kind, group, and + // version from the serialized data, or an error. If into is non-nil, it will be used as the target type + // and implementations may choose to use it rather than reallocating an object. However, the object is not + // guaranteed to be populated. The returned object is not guaranteed to match into. If defaults are + // provided, they are applied to the data by default. If no defaults or partial defaults are provided, the + // type of the into may be used to guide conversion decisions. + Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) +} + +// Serializer is the core interface for transforming objects into a serialized format and back. +// Implementations may choose to perform conversion of the object, but no assumptions should be made. +type Serializer interface { + Encoder + Decoder +} + +// Codec is a Serializer that deals with the details of versioning objects. It offers the same +// interface as Serializer, so this is a marker to consumers that care about the version of the objects +// they receive. +type Codec Serializer + +// ParameterCodec defines methods for serializing and deserializing API objects to url.Values and +// performing any necessary conversion. Unlike the normal Codec, query parameters are not self describing +// and the desired version must be specified. +type ParameterCodec interface { + // DecodeParameters takes the given url.Values in the specified group version and decodes them + // into the provided object, or returns an error. + DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error + // EncodeParameters encodes the provided object as query parameters or returns an error. + EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) +} + +// Framer is a factory for creating readers and writers that obey a particular framing pattern. +type Framer interface { + NewFrameReader(r io.ReadCloser) io.ReadCloser + NewFrameWriter(w io.Writer) io.Writer +} + +// SerializerInfo contains information about a specific serialization format +type SerializerInfo struct { + // MediaType is the value that represents this serializer over the wire. + MediaType string + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // Serializer is the individual object serializer for this media type. + Serializer Serializer + // PrettySerializer, if set, can serialize this object in a form biased towards + // readability. + PrettySerializer Serializer + // StreamSerializer, if set, describes the streaming serialization format + // for this media type. + StreamSerializer *StreamSerializerInfo +} + +// StreamSerializerInfo contains information about a specific stream serialization format +type StreamSerializerInfo struct { + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // Serializer is the top level object serializer for this type when streaming + Serializer + // Framer is the factory for retrieving streams that separate objects on the wire + Framer +} + +// NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers +// for multiple supported media types. This would commonly be accepted by a server component +// that performs HTTP content negotiation to accept multiple formats. +type NegotiatedSerializer interface { + // SupportedMediaTypes is the media types supported for reading and writing single objects. + SupportedMediaTypes() []SerializerInfo + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder +} + +// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers +// that can read and write data at rest. This would commonly be used by client tools that must +// read files, or server side storage interfaces that persist restful objects. +type StorageSerializer interface { + // SupportedMediaTypes are the media types supported for reading and writing objects. + SupportedMediaTypes() []SerializerInfo + + // UniversalDeserializer returns a Serializer that can read objects in multiple supported formats + // by introspecting the data at rest. + UniversalDeserializer() Decoder + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder +} + +// NestedObjectEncoder is an optional interface that objects may implement to be given +// an opportunity to encode any nested Objects / RawExtensions during serialization. +type NestedObjectEncoder interface { + EncodeNestedObjects(e Encoder) error +} + +// NestedObjectDecoder is an optional interface that objects may implement to be given +// an opportunity to decode any nested Objects / RawExtensions during serialization. +type NestedObjectDecoder interface { + DecodeNestedObjects(d Decoder) error +} + +/////////////////////////////////////////////////////////////////////////////// +// Non-codec interfaces + +type ObjectDefaulter interface { + // Default takes an object (must be a pointer) and applies any default values. + // Defaulters may not error. + Default(in Object) +} + +type ObjectVersioner interface { + ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) +} + +// ObjectConvertor converts an object to a different version. +type ObjectConvertor interface { + // Convert attempts to convert one object into another, or returns an error. This + // method does not mutate the in object, but the in and out object might share data structures, + // i.e. the out object cannot be mutated without mutating the in object as well. + // The context argument will be passed to all nested conversions. + Convert(in, out, context interface{}) error + // ConvertToVersion takes the provided object and converts it the provided version. This + // method does not mutate the in object, but the in and out object might share data structures, + // i.e. the out object cannot be mutated without mutating the in object as well. + // This method is similar to Convert() but handles specific details of choosing the correct + // output version. + ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) + ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error) +} + +// ObjectTyper contains methods for extracting the APIVersion and Kind +// of objects. +type ObjectTyper interface { + // ObjectKinds returns the all possible group,version,kind of the provided object, true if + // the object is unversioned, or an error if the object is not recognized + // (IsNotRegisteredError will return true). + ObjectKinds(Object) ([]schema.GroupVersionKind, bool, error) + // Recognizes returns true if the scheme is able to handle the provided version and kind, + // or more precisely that the provided version is a possible conversion or decoding + // target. + Recognizes(gvk schema.GroupVersionKind) bool +} + +// ObjectCreater contains methods for instantiating an object by kind and version. +type ObjectCreater interface { + New(kind schema.GroupVersionKind) (out Object, err error) +} + +// ResourceVersioner provides methods for setting and retrieving +// the resource version from an API object. +type ResourceVersioner interface { + SetResourceVersion(obj Object, version string) error + ResourceVersion(obj Object) (string, error) +} + +// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object. +type SelfLinker interface { + SetSelfLink(obj Object, selfLink string) error + SelfLink(obj Object) (string, error) + + // Knowing Name is sometimes necessary to use a SelfLinker. + Name(obj Object) (string, error) + // Knowing Namespace is sometimes necessary to use a SelfLinker + Namespace(obj Object) (string, error) +} + +// Object interface must be supported by all API types registered with Scheme. Since objects in a scheme are +// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows +// serializers to set the kind, version, and group the object is represented as. An Object may choose +// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized. +type Object interface { + GetObjectKind() schema.ObjectKind + DeepCopyObject() Object +} + +// Unstructured objects store values as map[string]interface{}, with only values that can be serialized +// to JSON allowed. +type Unstructured interface { + Object + // UnstructuredContent returns a non-nil map with this object's contents. Values may be + // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to + // and from JSON. SetUnstructuredContent should be used to mutate the contents. + UnstructuredContent() map[string]interface{} + // SetUnstructuredContent updates the object content to match the provided map. + SetUnstructuredContent(map[string]interface{}) + // IsList returns true if this type is a list or matches the list convention - has an array called "items". + IsList() bool + // EachListItem should pass a single item out of the list as an Object to the provided function. Any + // error should terminate the iteration. If IsList() returns false, this method should return an error + // instead of calling the provided function. + EachListItem(func(Object) error) error +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go new file mode 100644 index 000000000000..eeb380c3dc39 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } + +// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind +// interface if no objects are provided, or the ObjectKind interface of the object in the +// highest array position. +func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind { + last := obj.Last() + if last == nil { + return schema.EmptyObjectKind + } + return last.GetObjectKind() +} + +// First returns the leftmost object in the VersionedObjects array, which is usually the +// object as serialized on the wire. +func (obj *VersionedObjects) First() Object { + if len(obj.Objects) == 0 { + return nil + } + return obj.Objects[0] +} + +// Last is the rightmost object in the VersionedObjects array, which is the object after +// all transformations have been applied. This is the same object that would be returned +// by Decode in a normal invocation (without VersionedObjects in the into argument). +func (obj *VersionedObjects) Last() Object { + if len(obj.Objects) == 0 { + return nil + } + return obj.Objects[len(obj.Objects)-1] +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go new file mode 100644 index 000000000000..28a61d5fb574 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto + +/* +Package schema is a generated protocol buffer package. + +It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto + +It has these top-level messages: +*/ +package schema + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30, + 0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50, + 0x5e, 0xe6, 0x24, 0x57, 0xc7, 0xb2, 0xfc, 0x47, 0x8e, 0x5d, 0xa9, 0xac, 0x8f, 0xd0, 0xc7, 0x0a, + 0x0c, 0x0c, 0x6c, 0xdc, 0x17, 0xa9, 0x64, 0x07, 0x94, 0xdd, 0x4f, 0xa7, 0xcf, 0xf7, 0xf3, 0x68, + 0xfe, 0x27, 0xa1, 0x3d, 0x9a, 0xdc, 0x51, 0x74, 0x94, 0x68, 0xc2, 0x0b, 0xb9, 0xc1, 0x47, 0xdc, + 0x1f, 0x32, 0x68, 0x2b, 0xfb, 0x51, 0x3b, 0x8a, 0x57, 0x0c, 0x46, 0x61, 0xcc, 0x2e, 0x69, 0x4b, + 0x38, 0xf5, 0x23, 0x59, 0x89, 0x8a, 0x1c, 0x45, 0x99, 0x68, 0x10, 0x21, 0xfa, 0xe4, 0xbf, 0x7e, + 0x9a, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0x44, 0x73, 0xdf, 0x7f, 0x4a, 0xa7, 0x31, 0x77, + 0xa2, 0xf7, 0x16, 0x95, 0x57, 0x1e, 0x2b, 0xef, 0xf2, 0xb9, 0xae, 0x3a, 0xea, 0xd5, 0xb2, 0x87, + 0xdf, 0x79, 0x03, 0xb6, 0x6c, 0xc0, 0xd6, 0x0d, 0xd8, 0xad, 0x00, 0x9f, 0x0b, 0xf0, 0xa5, 0x00, + 0x5f, 0x0b, 0xf0, 0x47, 0x01, 0x7e, 0x7f, 0x02, 0x3b, 0x7d, 0xb4, 0xf8, 0x2b, 0x00, 0x00, 0xff, + 0xff, 0xba, 0x7e, 0x65, 0xf4, 0xd6, 0x00, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto new file mode 100644 index 000000000000..5aeeaa100a77 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -0,0 +1,26 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.runtime.schema; + +// Package-wide variables from generator "generated". +option go_package = "schema"; + diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go new file mode 100644 index 000000000000..4c67ed59801b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -0,0 +1,300 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "fmt" + "strings" +) + +// ParseResourceArg takes the common style of string which may be either `resource.group.com` or `resource.version.group.com` +// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended +// but with a knowledge of all GroupVersions, calling code can take a very good guess. If there are only two segments, then +// `*GroupVersionResource` is nil. +// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource` +func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { + var gvr *GroupVersionResource + if strings.Count(arg, ".") >= 2 { + s := strings.SplitN(arg, ".", 3) + gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]} + } + + return gvr, ParseGroupResource(arg) +} + +// ParseKindArg takes the common style of string which may be either `Kind.group.com` or `Kind.version.group.com` +// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended +// but with a knowledge of all GroupKinds, calling code can take a very good guess. If there are only two segments, then +// `*GroupVersionResource` is nil. +// `Kind.group.com` -> `group=com, version=group, kind=Kind` and `group=group.com, kind=Kind` +func ParseKindArg(arg string) (*GroupVersionKind, GroupKind) { + var gvk *GroupVersionKind + if strings.Count(arg, ".") >= 2 { + s := strings.SplitN(arg, ".", 3) + gvk = &GroupVersionKind{Group: s[2], Version: s[1], Kind: s[0]} + } + + return gvk, ParseGroupKind(arg) +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupResource struct { + Group string + Resource string +} + +func (gr GroupResource) WithVersion(version string) GroupVersionResource { + return GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource} +} + +func (gr GroupResource) Empty() bool { + return len(gr.Group) == 0 && len(gr.Resource) == 0 +} + +func (gr GroupResource) String() string { + if len(gr.Group) == 0 { + return gr.Resource + } + return gr.Resource + "." + gr.Group +} + +func ParseGroupKind(gk string) GroupKind { + i := strings.Index(gk, ".") + if i == -1 { + return GroupKind{Kind: gk} + } + + return GroupKind{Group: gk[i+1:], Kind: gk[:i]} +} + +// ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed +// for each field. +func ParseGroupResource(gr string) GroupResource { + if i := strings.Index(gr, "."); i >= 0 { + return GroupResource{Group: gr[i+1:], Resource: gr[:i]} + } + return GroupResource{Resource: gr} +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling +type GroupVersionResource struct { + Group string + Version string + Resource string +} + +func (gvr GroupVersionResource) Empty() bool { + return len(gvr.Group) == 0 && len(gvr.Version) == 0 && len(gvr.Resource) == 0 +} + +func (gvr GroupVersionResource) GroupResource() GroupResource { + return GroupResource{Group: gvr.Group, Resource: gvr.Resource} +} + +func (gvr GroupVersionResource) GroupVersion() GroupVersion { + return GroupVersion{Group: gvr.Group, Version: gvr.Version} +} + +func (gvr GroupVersionResource) String() string { + return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupKind struct { + Group string + Kind string +} + +func (gk GroupKind) Empty() bool { + return len(gk.Group) == 0 && len(gk.Kind) == 0 +} + +func (gk GroupKind) WithVersion(version string) GroupVersionKind { + return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} +} + +func (gk GroupKind) String() string { + if len(gk.Group) == 0 { + return gk.Kind + } + return gk.Kind + "." + gk.Group +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling +type GroupVersionKind struct { + Group string + Version string + Kind string +} + +// Empty returns true if group, version, and kind are empty +func (gvk GroupVersionKind) Empty() bool { + return len(gvk.Group) == 0 && len(gvk.Version) == 0 && len(gvk.Kind) == 0 +} + +func (gvk GroupVersionKind) GroupKind() GroupKind { + return GroupKind{Group: gvk.Group, Kind: gvk.Kind} +} + +func (gvk GroupVersionKind) GroupVersion() GroupVersion { + return GroupVersion{Group: gvk.Group, Version: gvk.Version} +} + +func (gvk GroupVersionKind) String() string { + return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +type GroupVersion struct { + Group string + Version string +} + +// Empty returns true if group and version are empty +func (gv GroupVersion) Empty() bool { + return len(gv.Group) == 0 && len(gv.Version) == 0 +} + +// String puts "group" and "version" into a single "group/version" string. For the legacy v1 +// it returns "v1". +func (gv GroupVersion) String() string { + // special case the internal apiVersion for the legacy kube types + if gv.Empty() { + return "" + } + + // special case of "v1" for backward compatibility + if len(gv.Group) == 0 && gv.Version == "v1" { + return gv.Version + } + if len(gv.Group) > 0 { + return gv.Group + "/" + gv.Version + } + return gv.Version +} + +// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false +// if none of the options match the group. It prefers a match to group and version over just group. +// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. +// TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) +// in fewer places. +func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { + for _, gvk := range kinds { + if gvk.Group == gv.Group && gvk.Version == gv.Version { + return gvk, true + } + } + for _, gvk := range kinds { + if gvk.Group == gv.Group { + return gv.WithKind(gvk.Kind), true + } + } + return GroupVersionKind{}, false +} + +// ParseGroupVersion turns "group/version" string into a GroupVersion struct. It reports error +// if it cannot parse the string. +func ParseGroupVersion(gv string) (GroupVersion, error) { + // this can be the internal version for the legacy kube types + // TODO once we've cleared the last uses as strings, this special case should be removed. + if (len(gv) == 0) || (gv == "/") { + return GroupVersion{}, nil + } + + switch strings.Count(gv, "/") { + case 0: + return GroupVersion{"", gv}, nil + case 1: + i := strings.Index(gv, "/") + return GroupVersion{gv[:i], gv[i+1:]}, nil + default: + return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv) + } +} + +// WithKind creates a GroupVersionKind based on the method receiver's GroupVersion and the passed Kind. +func (gv GroupVersion) WithKind(kind string) GroupVersionKind { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} +} + +// WithResource creates a GroupVersionResource based on the method receiver's GroupVersion and the passed Resource. +func (gv GroupVersion) WithResource(resource string) GroupVersionResource { + return GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: resource} +} + +// GroupVersions can be used to represent a set of desired group versions. +// TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. +// TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) +// in fewer places. +type GroupVersions []GroupVersion + +// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false +// if none of the options match the group. +func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { + var targets []GroupVersionKind + for _, gv := range gvs { + target, ok := gv.KindForGroupVersionKinds(kinds) + if !ok { + continue + } + targets = append(targets, target) + } + if len(targets) == 1 { + return targets[0], true + } + if len(targets) > 1 { + return bestMatch(kinds, targets), true + } + return GroupVersionKind{}, false +} + +// bestMatch tries to pick best matching GroupVersionKind and falls back to the first +// found if no exact match exists. +func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersionKind { + for _, gvk := range targets { + for _, k := range kinds { + if k == gvk { + return k + } + } + } + return targets[0] +} + +// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that +// do not use TypeMeta. +func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk.Empty() { + return "", "" + } + return gvk.GroupVersion().String(), gvk.Kind +} + +// FromAPIVersionAndKind returns a GVK representing the provided fields for types that +// do not use TypeMeta. This method exists to support test types and legacy serializations +// that have a distinct group and kind. +// TODO: further reduce usage of this method. +func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind { + if gv, err := ParseGroupVersion(apiVersion); err == nil { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} + } + return GroupVersionKind{Kind: kind} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go new file mode 100644 index 000000000000..b5706684587c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +// All objects that are serialized from a Scheme encode their type information. This interface is used +// by serialization to set type information from the Scheme onto the serialized version of an object. +// For objects that cannot be serialized or have unique requirements, this interface may be a no-op. +type ObjectKind interface { + // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil + // should clear the current setting. + SetGroupVersionKind(kind GroupVersionKind) + // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does + // not expose or provide these fields. + GroupVersionKind() GroupVersionKind +} + +// EmptyObjectKind implements the ObjectKind interface as a noop +var EmptyObjectKind = emptyObjectKind{} + +type emptyObjectKind struct{} + +// SetGroupVersionKind implements the ObjectKind interface +func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {} + +// GroupVersionKind implements the ObjectKind interface +func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go new file mode 100644 index 000000000000..fd37e293ab1a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -0,0 +1,754 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "net/url" + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/naming" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Scheme defines methods for serializing and deserializing API objects, a type +// registry for converting group, version, and kind information to and from Go +// schemas, and mappings between Go schemas of different versions. A scheme is the +// foundation for a versioned API and versioned configuration over time. +// +// In a Scheme, a Type is a particular Go struct, a Version is a point-in-time +// identifier for a particular representation of that Type (typically backwards +// compatible), a Kind is the unique name for that Type within the Version, and a +// Group identifies a set of Versions, Kinds, and Types that evolve over time. An +// Unversioned Type is one that is not yet formally bound to a type and is promised +// to be backwards compatible (effectively a "v1" of a Type that does not expect +// to break in the future). +// +// Schemes are not expected to change at runtime and are only threadsafe after +// registration is complete. +type Scheme struct { + // versionMap allows one to figure out the go type of an object with + // the given version and name. + gvkToType map[schema.GroupVersionKind]reflect.Type + + // typeToGroupVersion allows one to find metadata for a given go object. + // The reflect.Type we index by should *not* be a pointer. + typeToGVK map[reflect.Type][]schema.GroupVersionKind + + // unversionedTypes are transformed without conversion in ConvertToVersion. + unversionedTypes map[reflect.Type]schema.GroupVersionKind + + // unversionedKinds are the names of kinds that can be created in the context of any group + // or version + // TODO: resolve the status of unversioned types. + unversionedKinds map[string]reflect.Type + + // Map from version and resource to the corresponding func to convert + // resource field labels in that version to internal version. + fieldLabelConversionFuncs map[schema.GroupVersionKind]FieldLabelConversionFunc + + // defaulterFuncs is an array of interfaces to be called with an object to provide defaulting + // the provided object must be a pointer. + defaulterFuncs map[reflect.Type]func(interface{}) + + // converter stores all registered conversion functions. It also has + // default converting behavior. + converter *conversion.Converter + + // versionPriority is a map of groups to ordered lists of versions for those groups indicating the + // default priorities of these versions as registered in the scheme + versionPriority map[string][]string + + // observedVersions keeps track of the order we've seen versions during type registration + observedVersions []schema.GroupVersion + + // schemeName is the name of this scheme. If you don't specify a name, the stack of the NewScheme caller will be used. + // This is useful for error reporting to indicate the origin of the scheme. + schemeName string +} + +// FieldLabelConversionFunc converts a field selector to internal representation. +type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error) + +// NewScheme creates a new Scheme. This scheme is pluggable by default. +func NewScheme() *Scheme { + s := &Scheme{ + gvkToType: map[schema.GroupVersionKind]reflect.Type{}, + typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, + unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, + unversionedKinds: map[string]reflect.Type{}, + fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{}, + defaulterFuncs: map[reflect.Type]func(interface{}){}, + versionPriority: map[string][]string{}, + schemeName: naming.GetNameFromCallsite(internalPackages...), + } + s.converter = conversion.NewConverter(s.nameFunc) + + utilruntime.Must(s.AddConversionFuncs(DefaultEmbeddedConversions()...)) + + // Enable map[string][]string conversions by default + utilruntime.Must(s.AddConversionFuncs(DefaultStringConversions...)) + utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) + utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) + return s +} + +// nameFunc returns the name of the type that we wish to use to determine when two types attempt +// a conversion. Defaults to the go name of the type if the type is not registered. +func (s *Scheme) nameFunc(t reflect.Type) string { + // find the preferred names for this type + gvks, ok := s.typeToGVK[t] + if !ok { + return t.Name() + } + + for _, gvk := range gvks { + internalGV := gvk.GroupVersion() + internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in + internalGVK := internalGV.WithKind(gvk.Kind) + + if internalType, exists := s.gvkToType[internalGVK]; exists { + return s.typeToGVK[internalType][0].Kind + } + } + + return gvks[0].Kind +} + +// fromScope gets the input version, desired output version, and desired Scheme +// from a conversion.Scope. +func (s *Scheme) fromScope(scope conversion.Scope) *Scheme { + return s +} + +// Converter allows access to the converter for the scheme +func (s *Scheme) Converter() *conversion.Converter { + return s.converter +} + +// AddUnversionedTypes registers the provided types as "unversioned", which means that they follow special rules. +// Whenever an object of this type is serialized, it is serialized with the provided group version and is not +// converted. Thus unversioned objects are expected to remain backwards compatible forever, as if they were in an +// API group and version that would never be updated. +// +// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into +// every version with particular schemas. Resolve this method at that point. +func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { + s.addObservedVersion(version) + s.AddKnownTypes(version, types...) + for _, obj := range types { + t := reflect.TypeOf(obj).Elem() + gvk := version.WithKind(t.Name()) + s.unversionedTypes[t] = gvk + if old, ok := s.unversionedKinds[gvk.Kind]; ok && t != old { + panic(fmt.Sprintf("%v.%v has already been registered as unversioned kind %q - kind name must be unique in scheme %q", old.PkgPath(), old.Name(), gvk, s.schemeName)) + } + s.unversionedKinds[gvk.Kind] = t + } +} + +// AddKnownTypes registers all types passed in 'types' as being members of version 'version'. +// All objects passed to types should be pointers to structs. The name that go reports for +// the struct becomes the "kind" field when encoding. Version may not be empty - use the +// APIVersionInternal constant if you have a type that does not have a formal version. +func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { + s.addObservedVersion(gv) + for _, obj := range types { + t := reflect.TypeOf(obj) + if t.Kind() != reflect.Ptr { + panic("All types must be pointers to structs.") + } + t = t.Elem() + s.AddKnownTypeWithName(gv.WithKind(t.Name()), obj) + } +} + +// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should +// be encoded as. Useful for testing when you don't want to make multiple packages to define +// your structs. Version may not be empty - use the APIVersionInternal constant if you have a +// type that does not have a formal version. +func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) { + s.addObservedVersion(gvk.GroupVersion()) + t := reflect.TypeOf(obj) + if len(gvk.Version) == 0 { + panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t)) + } + if t.Kind() != reflect.Ptr { + panic("All types must be pointers to structs.") + } + t = t.Elem() + if t.Kind() != reflect.Struct { + panic("All types must be pointers to structs.") + } + + if oldT, found := s.gvkToType[gvk]; found && oldT != t { + panic(fmt.Sprintf("Double registration of different types for %v: old=%v.%v, new=%v.%v in scheme %q", gvk, oldT.PkgPath(), oldT.Name(), t.PkgPath(), t.Name(), s.schemeName)) + } + + s.gvkToType[gvk] = t + + for _, existingGvk := range s.typeToGVK[t] { + if existingGvk == gvk { + return + } + } + s.typeToGVK[t] = append(s.typeToGVK[t], gvk) +} + +// KnownTypes returns the types known for the given version. +func (s *Scheme) KnownTypes(gv schema.GroupVersion) map[string]reflect.Type { + types := make(map[string]reflect.Type) + for gvk, t := range s.gvkToType { + if gv != gvk.GroupVersion() { + continue + } + + types[gvk.Kind] = t + } + return types +} + +// AllKnownTypes returns the all known types. +func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type { + return s.gvkToType +} + +// ObjectKinds returns all possible group,version,kind of the go object, true if the +// object is considered unversioned, or an error if it's not a pointer or is unregistered. +func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) { + // Unstructured objects are always considered to have their declared GVK + if _, ok := obj.(Unstructured); ok { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return nil, false, NewMissingVersionErr("unstructured object has no version") + } + return []schema.GroupVersionKind{gvk}, false, nil + } + + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, false, err + } + t := v.Type() + + gvks, ok := s.typeToGVK[t] + if !ok { + return nil, false, NewNotRegisteredErrForType(s.schemeName, t) + } + _, unversionedType := s.unversionedTypes[t] + + return gvks, unversionedType, nil +} + +// Recognizes returns true if the scheme is able to handle the provided group,version,kind +// of an object. +func (s *Scheme) Recognizes(gvk schema.GroupVersionKind) bool { + _, exists := s.gvkToType[gvk] + return exists +} + +func (s *Scheme) IsUnversioned(obj Object) (bool, bool) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return false, false + } + t := v.Type() + + if _, ok := s.typeToGVK[t]; !ok { + return false, false + } + _, ok := s.unversionedTypes[t] + return ok, true +} + +// New returns a new API object of the given version and name, or an error if it hasn't +// been registered. The version and kind fields must be specified. +func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) { + if t, exists := s.gvkToType[kind]; exists { + return reflect.New(t).Interface().(Object), nil + } + + if t, exists := s.unversionedKinds[kind.Kind]; exists { + return reflect.New(t).Interface().(Object), nil + } + return nil, NewNotRegisteredErrForKind(s.schemeName, kind) +} + +// Log sets a logger on the scheme. For test purposes only +func (s *Scheme) Log(l conversion.DebugLogger) { + s.converter.Debug = l +} + +// AddIgnoredConversionType identifies a pair of types that should be skipped by +// conversion (because the data inside them is explicitly dropped during +// conversion). +func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error { + return s.converter.RegisterIgnoredConversion(from, to) +} + +// AddConversionFuncs adds functions to the list of conversion functions. The given +// functions should know how to convert between two of your API objects, or their +// sub-objects. We deduce how to call these functions from the types of their two +// parameters; see the comment for Converter.Register. +// +// Note that, if you need to copy sub-objects that didn't change, you can use the +// conversion.Scope object that will be passed to your conversion function. +// Additionally, all conversions started by Scheme will set the SrcVersion and +// DestVersion fields on the Meta object. Example: +// +// s.AddConversionFuncs( +// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error { +// // You can depend on Meta() being non-nil, and this being set to +// // the source version, e.g., "" +// s.Meta().SrcVersion +// // You can depend on this being set to the destination version, +// // e.g., "v1". +// s.Meta().DestVersion +// // Call scope.Convert to copy sub-fields. +// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0) +// return nil +// }, +// ) +// +// (For more detail about conversion functions, see Converter.Register's comment.) +// +// Also note that the default behavior, if you don't add a conversion function, is to +// sanely copy fields that have the same names and same type names. It's OK if the +// destination type has extra fields, but it must not remove any. So you only need to +// add conversion functions for things with changed/removed fields. +func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error { + for _, f := range conversionFuncs { + if err := s.converter.RegisterConversionFunc(f); err != nil { + return err + } + } + return nil +} + +// AddConversionFunc registers a function that converts between a and b by passing objects of those +// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce +// any other guarantee. +func (s *Scheme) AddConversionFunc(a, b interface{}, fn conversion.ConversionFunc) error { + return s.converter.RegisterUntypedConversionFunc(a, b, fn) +} + +// AddGeneratedConversionFunc registers a function that converts between a and b by passing objects of those +// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce +// any other guarantee. +func (s *Scheme) AddGeneratedConversionFunc(a, b interface{}, fn conversion.ConversionFunc) error { + return s.converter.RegisterGeneratedUntypedConversionFunc(a, b, fn) +} + +// AddFieldLabelConversionFunc adds a conversion function to convert field selectors +// of the given kind from the given version to internal version representation. +func (s *Scheme) AddFieldLabelConversionFunc(gvk schema.GroupVersionKind, conversionFunc FieldLabelConversionFunc) error { + s.fieldLabelConversionFuncs[gvk] = conversionFunc + return nil +} + +// RegisterInputDefaults sets the provided field mapping function and field matching +// as the defaults for the provided input type. The fn may be nil, in which case no +// mapping will happen by default. Use this method to register a mechanism for handling +// a specific input type in conversion, such as a map[string]string to structs. +func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error { + return s.converter.RegisterInputDefaults(in, fn, defaultFlags) +} + +// AddTypeDefaultingFunc registers a function that is passed a pointer to an +// object and can default fields on the object. These functions will be invoked +// when Default() is called. The function will never be called unless the +// defaulted object matches srcType. If this function is invoked twice with the +// same srcType, the fn passed to the later call will be used instead. +func (s *Scheme) AddTypeDefaultingFunc(srcType Object, fn func(interface{})) { + s.defaulterFuncs[reflect.TypeOf(srcType)] = fn +} + +// Default sets defaults on the provided Object. +func (s *Scheme) Default(src Object) { + if fn, ok := s.defaulterFuncs[reflect.TypeOf(src)]; ok { + fn(src) + } +} + +// Convert will attempt to convert in into out. Both must be pointers. For easy +// testing of conversion functions. Returns an error if the conversion isn't +// possible. You can call this with types that haven't been registered (for example, +// a to test conversion of types that are nested within registered types). The +// context interface is passed to the convertor. Convert also supports Unstructured +// types and will convert them intelligently. +func (s *Scheme) Convert(in, out interface{}, context interface{}) error { + unstructuredIn, okIn := in.(Unstructured) + unstructuredOut, okOut := out.(Unstructured) + switch { + case okIn && okOut: + // converting unstructured input to an unstructured output is a straight copy - unstructured + // is a "smart holder" and the contents are passed by reference between the two objects + unstructuredOut.SetUnstructuredContent(unstructuredIn.UnstructuredContent()) + return nil + + case okOut: + // if the output is an unstructured object, use the standard Go type to unstructured + // conversion. The object must not be internal. + obj, ok := in.(Object) + if !ok { + return fmt.Errorf("unable to convert object type %T to Unstructured, must be a runtime.Object", in) + } + gvks, unversioned, err := s.ObjectKinds(obj) + if err != nil { + return err + } + gvk := gvks[0] + + // if no conversion is necessary, convert immediately + if unversioned || gvk.Version != APIVersionInternal { + content, err := DefaultUnstructuredConverter.ToUnstructured(in) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + unstructuredOut.GetObjectKind().SetGroupVersionKind(gvk) + return nil + } + + // attempt to convert the object to an external version first. + target, ok := context.(GroupVersioner) + if !ok { + return fmt.Errorf("unable to convert the internal object type %T to Unstructured without providing a preferred version to convert to", in) + } + // Convert is implicitly unsafe, so we don't need to perform a safe conversion + versioned, err := s.UnsafeConvertToVersion(obj, target) + if err != nil { + return err + } + content, err := DefaultUnstructuredConverter.ToUnstructured(versioned) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + return nil + + case okIn: + // converting an unstructured object to any type is modeled by first converting + // the input to a versioned type, then running standard conversions + typed, err := s.unstructuredToTyped(unstructuredIn) + if err != nil { + return err + } + in = typed + } + + flags, meta := s.generateConvertMeta(in) + meta.Context = context + if flags == 0 { + flags = conversion.AllowDifferentFieldTypeNames + } + return s.converter.Convert(in, out, flags, meta) +} + +// ConvertFieldLabel alters the given field label and value for an kind field selector from +// versioned representation to an unversioned one or returns an error. +func (s *Scheme) ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error) { + conversionFunc, ok := s.fieldLabelConversionFuncs[gvk] + if !ok { + return DefaultMetaV1FieldSelectorConversion(label, value) + } + return conversionFunc(label, value) +} + +// ConvertToVersion attempts to convert an input object to its matching Kind in another +// version within this scheme. Will return an error if the provided version does not +// contain the inKind (or a mapping by name defined with AddKnownTypeWithName). Will also +// return an error if the conversion does not result in a valid Object being +// returned. Passes target down to the conversion methods as the Context on the scope. +func (s *Scheme) ConvertToVersion(in Object, target GroupVersioner) (Object, error) { + return s.convertToVersion(true, in, target) +} + +// UnsafeConvertToVersion will convert in to the provided target if such a conversion is possible, +// but does not guarantee the output object does not share fields with the input object. It attempts to be as +// efficient as possible when doing conversion. +func (s *Scheme) UnsafeConvertToVersion(in Object, target GroupVersioner) (Object, error) { + return s.convertToVersion(false, in, target) +} + +// convertToVersion handles conversion with an optional copy. +func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (Object, error) { + var t reflect.Type + + if u, ok := in.(Unstructured); ok { + typed, err := s.unstructuredToTyped(u) + if err != nil { + return nil, err + } + + in = typed + // unstructuredToTyped returns an Object, which must be a pointer to a struct. + t = reflect.TypeOf(in).Elem() + + } else { + // determine the incoming kinds with as few allocations as possible. + t = reflect.TypeOf(in) + if t.Kind() != reflect.Ptr { + return nil, fmt.Errorf("only pointer types may be converted: %v", t) + } + t = t.Elem() + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + } + } + + kinds, ok := s.typeToGVK[t] + if !ok || len(kinds) == 0 { + return nil, NewNotRegisteredErrForType(s.schemeName, t) + } + + gvk, ok := target.KindForGroupVersionKinds(kinds) + if !ok { + // try to see if this type is listed as unversioned (for legacy support) + // TODO: when we move to server API versions, we should completely remove the unversioned concept + if unversionedKind, ok := s.unversionedTypes[t]; ok { + if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { + return copyAndSetTargetKind(copy, in, gvk) + } + return copyAndSetTargetKind(copy, in, unversionedKind) + } + return nil, NewNotRegisteredErrForTarget(s.schemeName, t, target) + } + + // target wants to use the existing type, set kind and return (no conversion necessary) + for _, kind := range kinds { + if gvk == kind { + return copyAndSetTargetKind(copy, in, gvk) + } + } + + // type is unversioned, no conversion necessary + if unversionedKind, ok := s.unversionedTypes[t]; ok { + if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { + return copyAndSetTargetKind(copy, in, gvk) + } + return copyAndSetTargetKind(copy, in, unversionedKind) + } + + out, err := s.New(gvk) + if err != nil { + return nil, err + } + + if copy { + in = in.DeepCopyObject() + } + + flags, meta := s.generateConvertMeta(in) + meta.Context = target + if err := s.converter.Convert(in, out, flags, meta); err != nil { + return nil, err + } + + setTargetKind(out, gvk) + return out, nil +} + +// unstructuredToTyped attempts to transform an unstructured object to a typed +// object if possible. It will return an error if conversion is not possible, or the versioned +// Go form of the object. Note that this conversion will lose fields. +func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) { + // the type must be something we recognize + gvks, _, err := s.ObjectKinds(in) + if err != nil { + return nil, err + } + typed, err := s.New(gvks[0]) + if err != nil { + return nil, err + } + if err := DefaultUnstructuredConverter.FromUnstructured(in.UnstructuredContent(), typed); err != nil { + return nil, fmt.Errorf("unable to convert unstructured object to %v: %v", gvks[0], err) + } + return typed, nil +} + +// generateConvertMeta constructs the meta value we pass to Convert. +func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { + return s.converter.DefaultMeta(reflect.TypeOf(in)) +} + +// copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful. +func copyAndSetTargetKind(copy bool, obj Object, kind schema.GroupVersionKind) (Object, error) { + if copy { + obj = obj.DeepCopyObject() + } + setTargetKind(obj, kind) + return obj, nil +} + +// setTargetKind sets the kind on an object, taking into account whether the target kind is the internal version. +func setTargetKind(obj Object, kind schema.GroupVersionKind) { + if kind.Version == APIVersionInternal { + // internal is a special case + // TODO: look at removing the need to special case this + obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) + return + } + obj.GetObjectKind().SetGroupVersionKind(kind) +} + +// SetVersionPriority allows specifying a precise order of priority. All specified versions must be in the same group, +// and the specified order overwrites any previously specified order for this group +func (s *Scheme) SetVersionPriority(versions ...schema.GroupVersion) error { + groups := sets.String{} + order := []string{} + for _, version := range versions { + if len(version.Version) == 0 || version.Version == APIVersionInternal { + return fmt.Errorf("internal versions cannot be prioritized: %v", version) + } + + groups.Insert(version.Group) + order = append(order, version.Version) + } + if len(groups) != 1 { + return fmt.Errorf("must register versions for exactly one group: %v", strings.Join(groups.List(), ", ")) + } + + s.versionPriority[groups.List()[0]] = order + return nil +} + +// PrioritizedVersionsForGroup returns versions for a single group in priority order +func (s *Scheme) PrioritizedVersionsForGroup(group string) []schema.GroupVersion { + ret := []schema.GroupVersion{} + for _, version := range s.versionPriority[group] { + ret = append(ret, schema.GroupVersion{Group: group, Version: version}) + } + for _, observedVersion := range s.observedVersions { + if observedVersion.Group != group { + continue + } + found := false + for _, existing := range ret { + if existing == observedVersion { + found = true + break + } + } + if !found { + ret = append(ret, observedVersion) + } + } + + return ret +} + +// PrioritizedVersionsAllGroups returns all known versions in their priority order. Groups are random, but +// versions for a single group are prioritized +func (s *Scheme) PrioritizedVersionsAllGroups() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for group, versions := range s.versionPriority { + for _, version := range versions { + ret = append(ret, schema.GroupVersion{Group: group, Version: version}) + } + } + for _, observedVersion := range s.observedVersions { + found := false + for _, existing := range ret { + if existing == observedVersion { + found = true + break + } + } + if !found { + ret = append(ret, observedVersion) + } + } + return ret +} + +// PreferredVersionAllGroups returns the most preferred version for every group. +// group ordering is random. +func (s *Scheme) PreferredVersionAllGroups() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for group, versions := range s.versionPriority { + for _, version := range versions { + ret = append(ret, schema.GroupVersion{Group: group, Version: version}) + break + } + } + for _, observedVersion := range s.observedVersions { + found := false + for _, existing := range ret { + if existing.Group == observedVersion.Group { + found = true + break + } + } + if !found { + ret = append(ret, observedVersion) + } + } + + return ret +} + +// IsGroupRegistered returns true if types for the group have been registered with the scheme +func (s *Scheme) IsGroupRegistered(group string) bool { + for _, observedVersion := range s.observedVersions { + if observedVersion.Group == group { + return true + } + } + return false +} + +// IsVersionRegistered returns true if types for the version have been registered with the scheme +func (s *Scheme) IsVersionRegistered(version schema.GroupVersion) bool { + for _, observedVersion := range s.observedVersions { + if observedVersion == version { + return true + } + } + + return false +} + +func (s *Scheme) addObservedVersion(version schema.GroupVersion) { + if len(version.Version) == 0 || version.Version == APIVersionInternal { + return + } + for _, observedVersion := range s.observedVersions { + if observedVersion == version { + return + } + } + + s.observedVersions = append(s.observedVersions, version) +} + +func (s *Scheme) Name() string { + return s.schemeName +} + +// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common +// call chains to NewReflector, so they'd be low entropy names for reflectors +var internalPackages = []string{"k8s.io/apimachinery/pkg/runtime/scheme.go"} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go new file mode 100644 index 000000000000..944db48182bd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +// SchemeBuilder collects functions that add things to a scheme. It's to allow +// code to compile without explicitly referencing generated types. You should +// declare one in each package that will have generated deep copy or conversion +// functions. +type SchemeBuilder []func(*Scheme) error + +// AddToScheme applies all the stored functions to the scheme. A non-nil error +// indicates that one function failed and the attempt was abandoned. +func (sb *SchemeBuilder) AddToScheme(s *Scheme) error { + for _, f := range *sb { + if err := f(s); err != nil { + return err + } + } + return nil +} + +// Register adds a scheme setup function to the list. +func (sb *SchemeBuilder) Register(funcs ...func(*Scheme) error) { + for _, f := range funcs { + *sb = append(*sb, f) + } +} + +// NewSchemeBuilder calls Register for you. +func NewSchemeBuilder(funcs ...func(*Scheme) error) SchemeBuilder { + var sb SchemeBuilder + sb.Register(funcs...) + return sb +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go new file mode 100644 index 000000000000..5bc642bc8e9e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go @@ -0,0 +1,262 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "fmt" + "go/ast" + "go/doc" + "go/parser" + "go/token" + "io" + "reflect" + "strings" +) + +// Pair of strings. We keed the name of fields and the doc +type Pair struct { + Name, Doc string +} + +// KubeTypes is an array to represent all available types in a parsed file. [0] is for the type itself +type KubeTypes []Pair + +func astFrom(filePath string) *doc.Package { + fset := token.NewFileSet() + m := make(map[string]*ast.File) + + f, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments) + if err != nil { + fmt.Println(err) + return nil + } + + m[filePath] = f + apkg, _ := ast.NewPackage(fset, m, nil, nil) + + return doc.New(apkg, "", 0) +} + +func fmtRawDoc(rawDoc string) string { + var buffer bytes.Buffer + delPrevChar := func() { + if buffer.Len() > 0 { + buffer.Truncate(buffer.Len() - 1) // Delete the last " " or "\n" + } + } + + // Ignore all lines after --- + rawDoc = strings.Split(rawDoc, "---")[0] + + for _, line := range strings.Split(rawDoc, "\n") { + line = strings.TrimRight(line, " ") + leading := strings.TrimLeft(line, " ") + switch { + case len(line) == 0: // Keep paragraphs + delPrevChar() + buffer.WriteString("\n\n") + case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs + case strings.HasPrefix(leading, "+"): // Ignore instructions to the generators + default: + if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { + delPrevChar() + line = "\n" + line + "\n" // Replace it with newline. This is useful when we have a line with: "Example:\n\tJSON-someting..." + } else { + line += " " + } + buffer.WriteString(line) + } + } + + postDoc := strings.TrimRight(buffer.String(), "\n") + postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to " + postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape " + postDoc = strings.Replace(postDoc, "\n", "\\n", -1) + postDoc = strings.Replace(postDoc, "\t", "\\t", -1) + + return postDoc +} + +// fieldName returns the name of the field as it should appear in JSON format +// "-" indicates that this field is not part of the JSON representation +func fieldName(field *ast.Field) string { + jsonTag := "" + if field.Tag != nil { + jsonTag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1]).Get("json") // Delete first and last quotation + if strings.Contains(jsonTag, "inline") { + return "-" + } + } + + jsonTag = strings.Split(jsonTag, ",")[0] // This can return "-" + if jsonTag == "" { + if field.Names != nil { + return field.Names[0].Name + } + return field.Type.(*ast.Ident).Name + } + return jsonTag +} + +// A buffer of lines that will be written. +type bufferedLine struct { + line string + indentation int +} + +type buffer struct { + lines []bufferedLine +} + +func newBuffer() *buffer { + return &buffer{ + lines: make([]bufferedLine, 0), + } +} + +func (b *buffer) addLine(line string, indent int) { + b.lines = append(b.lines, bufferedLine{line, indent}) +} + +func (b *buffer) flushLines(w io.Writer) error { + for _, line := range b.lines { + indentation := strings.Repeat("\t", line.indentation) + fullLine := fmt.Sprintf("%s%s", indentation, line.line) + if _, err := io.WriteString(w, fullLine); err != nil { + return err + } + } + return nil +} + +func writeFuncHeader(b *buffer, structName string, indent int) { + s := fmt.Sprintf("var map_%s = map[string]string {\n", structName) + b.addLine(s, indent) +} + +func writeFuncFooter(b *buffer, structName string, indent int) { + b.addLine("}\n", indent) // Closes the map definition + + s := fmt.Sprintf("func (%s) SwaggerDoc() map[string]string {\n", structName) + b.addLine(s, indent) + s = fmt.Sprintf("return map_%s\n", structName) + b.addLine(s, indent+1) + b.addLine("}\n", indent) // Closes the function definition +} + +func writeMapBody(b *buffer, kubeType []Pair, indent int) { + format := "\"%s\": \"%s\",\n" + for _, pair := range kubeType { + s := fmt.Sprintf(format, pair.Name, pair.Doc) + b.addLine(s, indent+2) + } +} + +// ParseDocumentationFrom gets all types' documentation and returns them as an +// array. Each type is again represented as an array (we have to use arrays as we +// need to be sure for the order of the fields). This function returns fields and +// struct definitions that have no documentation as {name, ""}. +func ParseDocumentationFrom(src string) []KubeTypes { + var docForTypes []KubeTypes + + pkg := astFrom(src) + + for _, kubType := range pkg.Types { + if structType, ok := kubType.Decl.Specs[0].(*ast.TypeSpec).Type.(*ast.StructType); ok { + var ks KubeTypes + ks = append(ks, Pair{kubType.Name, fmtRawDoc(kubType.Doc)}) + + for _, field := range structType.Fields.List { + if n := fieldName(field); n != "-" { + fieldDoc := fmtRawDoc(field.Doc.Text()) + ks = append(ks, Pair{n, fieldDoc}) + } + } + docForTypes = append(docForTypes, ks) + } + } + + return docForTypes +} + +// WriteSwaggerDocFunc writes a declaration of a function as a string. This function is used in +// Swagger as a documentation source for structs and theirs fields +func WriteSwaggerDocFunc(kubeTypes []KubeTypes, w io.Writer) error { + for _, kubeType := range kubeTypes { + structName := kubeType[0].Name + kubeType[0].Name = "" + + // Ignore empty documentation + docfulTypes := make(KubeTypes, 0, len(kubeType)) + for _, pair := range kubeType { + if pair.Doc != "" { + docfulTypes = append(docfulTypes, pair) + } + } + + if len(docfulTypes) == 0 { + continue // If no fields and the struct have documentation, skip the function definition + } + + indent := 0 + buffer := newBuffer() + + writeFuncHeader(buffer, structName, indent) + writeMapBody(buffer, docfulTypes, indent) + writeFuncFooter(buffer, structName, indent) + buffer.addLine("\n", 0) + + if err := buffer.flushLines(w); err != nil { + return err + } + } + + return nil +} + +// VerifySwaggerDocsExist writes in a io.Writer a list of structs and fields that +// are missing of documentation. +func VerifySwaggerDocsExist(kubeTypes []KubeTypes, w io.Writer) (int, error) { + missingDocs := 0 + buffer := newBuffer() + + for _, kubeType := range kubeTypes { + structName := kubeType[0].Name + if kubeType[0].Doc == "" { + format := "Missing documentation for the struct itself: %s\n" + s := fmt.Sprintf(format, structName) + buffer.addLine(s, 0) + missingDocs++ + } + kubeType = kubeType[1:] // Skip struct definition + + for _, pair := range kubeType { // Iterate only the fields + if pair.Doc == "" { + format := "In struct: %s, field documentation is missing: %s\n" + s := fmt.Sprintf(format, structName, pair.Name) + buffer.addLine(s, 0) + missingDocs++ + } + } + } + + if err := buffer.flushLines(w); err != nil { + return -1, err + } + return missingDocs, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go new file mode 100644 index 000000000000..e4515d8ed008 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -0,0 +1,137 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +// Note that the types provided in this file are not versioned and are intended to be +// safe to use from within all versions of every API object. + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +k8s:deepcopy-gen=false +// +protobuf=true +// +k8s:openapi-gen=true +type TypeMeta struct { + // +optional + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + // +optional + Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"` +} + +const ( + ContentTypeJSON string = "application/json" +) + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type RawExtension struct { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + Raw []byte `protobuf:"bytes,1,opt,name=raw"` + // Object can hold a representation of this extension - useful for working with versioned + // structs. + Object Object `json:"-"` +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=true +// +k8s:openapi-gen=true +type Unknown struct { + TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"` + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + Raw []byte `protobuf:"bytes,2,opt,name=raw"` + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + ContentType string `protobuf:"bytes,4,opt,name=contentType"` +} + +// VersionedObjects is used by Decoders to give callers a way to access all versions +// of an object during the decoding process. +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +type VersionedObjects struct { + // Objects is the set of objects retrieved during decoding, in order of conversion. + // The 0 index is the object as serialized on the wire. If conversion has occurred, + // other objects may be present. The right most object is the same as would be returned + // by a normal Decode call. + Objects []Object +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go new file mode 100644 index 000000000000..ead96ee05543 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -0,0 +1,69 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" +) + +type ProtobufMarshaller interface { + MarshalTo(data []byte) (int, error) +} + +// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown +// that will contain an object that implements ProtobufMarshaller. +func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + + if b != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, size) + n2, err := b.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + if uint64(n2) != size { + // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2) + } + i += n2 + } + + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i += copy(data[i:], m.ContentEncoding) + + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i += copy(data[i:], m.ContentType) + return i, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go new file mode 100644 index 000000000000..8b9182f359d4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -0,0 +1,108 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package runtime + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RawExtension) DeepCopyInto(out *RawExtension) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Object != nil { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawExtension. +func (in *RawExtension) DeepCopy() *RawExtension { + if in == nil { + return nil + } + out := new(RawExtension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Unknown) DeepCopyInto(out *Unknown) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Unknown. +func (in *Unknown) DeepCopy() *Unknown { + if in == nil { + return nil + } + out := new(Unknown) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *Unknown) DeepCopyObject() Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]Object, len(*in)) + for i := range *in { + if (*in)[i] != nil { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionedObjects. +func (in *VersionedObjects) DeepCopy() *VersionedObjects { + if in == nil { + return nil + } + out := new(VersionedObjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *VersionedObjects) DeepCopyObject() Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/selection/operator.go b/vendor/k8s.io/apimachinery/pkg/selection/operator.go new file mode 100644 index 000000000000..298f798c4379 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/selection/operator.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selection + +// Operator represents a key/field's relationship to value(s). +// See labels.Requirement and fields.Requirement for more details. +type Operator string + +const ( + DoesNotExist Operator = "!" + Equals Operator = "=" + DoubleEquals Operator = "==" + In Operator = "in" + NotEquals Operator = "!=" + NotIn Operator = "notin" + Exists Operator = "exists" + GreaterThan Operator = "gt" + LessThan Operator = "lt" +) diff --git a/vendor/k8s.io/apimachinery/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go new file mode 100644 index 000000000000..5667fa99212c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package types implements various generic types used throughout kubernetes. +package types // import "k8s.io/apimachinery/pkg/types" diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go new file mode 100644 index 000000000000..88f0de36db3a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -0,0 +1,43 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "fmt" +) + +// NamespacedName comprises a resource name, with a mandatory namespace, +// rendered as "/". Being a type captures intent and +// helps make sure that UIDs, namespaced names and non-namespaced names +// do not get conflated in code. For most use cases, namespace and name +// will already have been format validated at the API entry point, so we +// don't do that here. Where that's not the case (e.g. in testing), +// consider using NamespacedNameOrDie() in testing.go in this package. + +type NamespacedName struct { + Namespace string + Name string +} + +const ( + Separator = '/' +) + +// String returns the general purpose string representation +func (n NamespacedName) String() string { + return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name) +} diff --git a/vendor/k8s.io/apimachinery/pkg/types/nodename.go b/vendor/k8s.io/apimachinery/pkg/types/nodename.go new file mode 100644 index 000000000000..fee348d7e76f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/nodename.go @@ -0,0 +1,43 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// NodeName is a type that holds a api.Node's Name identifier. +// Being a type captures intent and helps make sure that the node name +// is not confused with similar concepts (the hostname, the cloud provider id, +// the cloud provider name etc) +// +// To clarify the various types: +// +// * Node.Name is the Name field of the Node in the API. This should be stored in a NodeName. +// Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level. +// +// * Hostname is the hostname of the local machine (from uname -n). +// However, some components allow the user to pass in a --hostname-override flag, +// which will override this in most places. In the absence of anything more meaningful, +// kubelet will use Hostname as the Node.Name when it creates the Node. +// +// * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId. +// +// For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the +// Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up +// to the cloudprovider how to do this mapping. +// +// For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the +// PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if +// we are using a custom DHCP domain it won't be. +type NodeName string diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go new file mode 100644 index 000000000000..d522d1dbdc66 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -0,0 +1,28 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// Similarly to above, these are constants to support HTTP PATCH utilized by +// both the client and server that didn't make sense for a whole package to be +// dedicated to. +type PatchType string + +const ( + JSONPatchType PatchType = "application/json-patch+json" + MergePatchType PatchType = "application/merge-patch+json" + StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" +) diff --git a/vendor/k8s.io/apimachinery/pkg/types/uid.go b/vendor/k8s.io/apimachinery/pkg/types/uid.go new file mode 100644 index 000000000000..869339222e96 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/uid.go @@ -0,0 +1,22 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// UID is a type that holds unique ID values, including UUIDs. Because we +// don't ONLY use UUIDs, this is an alias to string. Being a type captures +// intent and helps make sure that UIDs and names do not get conflated. +type UID string diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go new file mode 100644 index 000000000000..5d4d6250a316 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors implements various utility functions and types around errors. +package errors // import "k8s.io/apimachinery/pkg/util/errors" diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go new file mode 100644 index 000000000000..88e937679da3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -0,0 +1,201 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "fmt" +) + +// MessageCountMap contains occurrence for each error message. +type MessageCountMap map[string]int + +// Aggregate represents an object that contains multiple errors, but does not +// necessarily have singular semantic meaning. +type Aggregate interface { + error + Errors() []error +} + +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func NewAggregate(errlist []error) Aggregate { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregate []error + +// Error is part of the error interface. +func (agg aggregate) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) + } + result += "]" + return result +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// Matcher is used to match errors. Returns true if the error matches. +type Matcher func(error) bool + +// FilterOut removes all errors that match any of the matchers from the input +// error. If the input is a singular error, only that error is tested. If the +// input implements the Aggregate interface, the list of errors will be +// processed recursively. +// +// This can be used, for example, to remove known-OK errors (such as io.EOF or +// os.PathNotFound) from a list of errors. +func FilterOut(err error, fns ...Matcher) error { + if err == nil { + return nil + } + if agg, ok := err.(Aggregate); ok { + return NewAggregate(filterErrors(agg.Errors(), fns...)) + } + if !matchesError(err, fns...) { + return err + } + return nil +} + +// matchesError returns true if any Matcher returns true +func matchesError(err error, fns ...Matcher) bool { + for _, fn := range fns { + if fn(err) { + return true + } + } + return false +} + +// filterErrors returns any errors (or nested errors, if the list contains +// nested Errors) for which all fns return false. If no errors +// remain a nil list is returned. The resulting silec will have all +// nested slices flattened as a side effect. +func filterErrors(list []error, fns ...Matcher) []error { + result := []error{} + for _, err := range list { + r := FilterOut(err, fns...) + if r != nil { + result = append(result, r) + } + } + return result +} + +// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary +// nesting, and flattens them all into a single Aggregate, recursively. +func Flatten(agg Aggregate) Aggregate { + result := []error{} + if agg == nil { + return nil + } + for _, err := range agg.Errors() { + if a, ok := err.(Aggregate); ok { + r := Flatten(a) + if r != nil { + result = append(result, r.Errors()...) + } + } else { + if err != nil { + result = append(result, err) + } + } + } + return NewAggregate(result) +} + +// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate +func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { + if m == nil { + return nil + } + result := make([]error, 0, len(m)) + for errStr, count := range m { + var countStr string + if count > 1 { + countStr = fmt.Sprintf(" (repeated %v times)", count) + } + result = append(result, fmt.Errorf("%v%v", errStr, countStr)) + } + return NewAggregate(result) +} + +// Reduce will return err or, if err is an Aggregate and only has one item, +// the first item in the aggregate. +func Reduce(err error) error { + if agg, ok := err.(Aggregate); ok && err != nil { + switch len(agg.Errors()) { + case 1: + return agg.Errors()[0] + case 0: + return nil + } + } + return err +} + +// AggregateGoroutines runs the provided functions in parallel, stuffing all +// non-nil errors into the returned Aggregate. +// Returns nil if all the functions complete successfully. +func AggregateGoroutines(funcs ...func() error) Aggregate { + errChan := make(chan error, len(funcs)) + for _, f := range funcs { + go func(f func() error) { errChan <- f() }(f) + } + errs := make([]error, 0) + for i := 0; i < cap(errChan); i++ { + if err := <-errChan; err != nil { + errs = append(errs, err) + } + } + return NewAggregate(errs) +} + +// ErrPreconditionViolated is returned when the precondition is violated +var ErrPreconditionViolated = errors.New("precondition is violated") diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go new file mode 100644 index 000000000000..48dd7d9c551d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -0,0 +1,362 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto + +/* + Package intstr is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto + + It has these top-level messages: + IntOrString +*/ +package intstr + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func init() { + proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") +} +func (m *IntOrString) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) + i += copy(dAtA[i:], m.StrVal) + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *IntOrString) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Type)) + n += 1 + sovGenerated(uint64(m.IntVal)) + l = len(m.StrVal) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *IntOrString) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) + } + m.IntVal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntVal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StrVal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, + 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, + 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, + 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, + 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, + 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, + 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, + 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, + 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, + 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, + 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, + 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, + 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, + 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, + 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, + 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, + 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, + 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, + 0x64, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto new file mode 100644 index 000000000000..e79fb9e57266 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.util.intstr; + +// Package-wide variables from generator "generated". +option go_package = "intstr"; + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message IntOrString { + optional int64 type = 1; + + optional int32 intVal = 2; + + optional string strVal = 3; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go new file mode 100644 index 000000000000..5b26ed262631 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -0,0 +1,184 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package intstr + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "runtime/debug" + "strconv" + "strings" + + "github.com/google/gofuzz" + "k8s.io/klog" +) + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +type IntOrString struct { + Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"` + IntVal int32 `protobuf:"varint,2,opt,name=intVal"` + StrVal string `protobuf:"bytes,3,opt,name=strVal"` +} + +// Type represents the stored type of IntOrString. +type Type int + +const ( + Int Type = iota // The IntOrString holds an int. + String // The IntOrString holds a string. +) + +// FromInt creates an IntOrString object with an int32 value. It is +// your responsibility not to call this method with a value greater +// than int32. +// TODO: convert to (val int32) +func FromInt(val int) IntOrString { + if val > math.MaxInt32 || val < math.MinInt32 { + klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + } + return IntOrString{Type: Int, IntVal: int32(val)} +} + +// FromString creates an IntOrString object with a string value. +func FromString(val string) IntOrString { + return IntOrString{Type: String, StrVal: val} +} + +// Parse the given string and try to convert it to an integer before +// setting it as a string value. +func Parse(val string) IntOrString { + i, err := strconv.Atoi(val) + if err != nil { + return FromString(val) + } + return FromInt(i) +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (intstr *IntOrString) UnmarshalJSON(value []byte) error { + if value[0] == '"' { + intstr.Type = String + return json.Unmarshal(value, &intstr.StrVal) + } + intstr.Type = Int + return json.Unmarshal(value, &intstr.IntVal) +} + +// String returns the string value, or the Itoa of the int value. +func (intstr *IntOrString) String() string { + if intstr.Type == String { + return intstr.StrVal + } + return strconv.Itoa(intstr.IntValue()) +} + +// IntValue returns the IntVal if type Int, or if +// it is a String, will attempt a conversion to int. +func (intstr *IntOrString) IntValue() int { + if intstr.Type == String { + i, _ := strconv.Atoi(intstr.StrVal) + return i + } + return int(intstr.IntVal) +} + +// MarshalJSON implements the json.Marshaller interface. +func (intstr IntOrString) MarshalJSON() ([]byte, error) { + switch intstr.Type { + case Int: + return json.Marshal(intstr.IntVal) + case String: + return json.Marshal(intstr.StrVal) + default: + return []byte{}, fmt.Errorf("impossible IntOrString.Type") + } +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } + +func (intstr *IntOrString) Fuzz(c fuzz.Continue) { + if intstr == nil { + return + } + if c.RandBool() { + intstr.Type = Int + c.Fuzz(&intstr.IntVal) + intstr.StrVal = "" + } else { + intstr.Type = String + intstr.IntVal = 0 + c.Fuzz(&intstr.StrVal) + } +} + +func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString { + if intOrPercent == nil { + return &defaultValue + } + return intOrPercent +} + +func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { + if intOrPercent == nil { + return 0, errors.New("nil value for IntOrString") + } + value, isPercent, err := getIntOrPercentValue(intOrPercent) + if err != nil { + return 0, fmt.Errorf("invalid value for IntOrString: %v", err) + } + if isPercent { + if roundUp { + value = int(math.Ceil(float64(value) * (float64(total)) / 100)) + } else { + value = int(math.Floor(float64(value) * (float64(total)) / 100)) + } + } + return value, nil +} + +func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { + switch intOrStr.Type { + case Int: + return intOrStr.IntValue(), false, nil + case String: + s := strings.Replace(intOrStr.StrVal, "%", "", -1) + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err) + } + return int(v), true, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go new file mode 100644 index 000000000000..10c8cb837ed5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -0,0 +1,119 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "bytes" + "encoding/json" + "io" +) + +// NewEncoder delegates to json.NewEncoder +// It is only here so this package can be a drop-in for common encoding/json uses +func NewEncoder(w io.Writer) *json.Encoder { + return json.NewEncoder(w) +} + +// Marshal delegates to json.Marshal +// It is only here so this package can be a drop-in for common encoding/json uses +func Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals the given data +// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +func Unmarshal(data []byte, v interface{}) error { + switch v := v.(type) { + case *map[string]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertMapNumbers(*v) + + case *[]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertSliceNumbers(*v) + + default: + return json.Unmarshal(data, v) + } +} + +// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertMapNumbers(m map[string]interface{}) error { + var err error + for k, v := range m { + switch v := v.(type) { + case json.Number: + m[k], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v) + case []interface{}: + err = convertSliceNumbers(v) + } + if err != nil { + return err + } + } + return nil +} + +// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertSliceNumbers(s []interface{}) error { + var err error + for i, v := range s { + switch v := v.(type) { + case json.Number: + s[i], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v) + case []interface{}: + err = convertSliceNumbers(v) + } + if err != nil { + return err + } + } + return nil +} + +// convertNumber converts a json.Number to an int64 or float64, or returns an error +func convertNumber(n json.Number) (interface{}, error) { + // Attempt to convert to an int64 first + if i, err := n.Int64(); err == nil { + return i, nil + } + // Return a float64 (default json.Decode() behavior) + // An overflow will return an error + return n.Float64() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go new file mode 100644 index 000000000000..2965d5a8bc52 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package naming + +import ( + "fmt" + "regexp" + goruntime "runtime" + "runtime/debug" + "strconv" + "strings" +) + +// GetNameFromCallsite walks back through the call stack until we find a caller from outside of the ignoredPackages +// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging +func GetNameFromCallsite(ignoredPackages ...string) string { + name := "????" + const maxStack = 10 + for i := 1; i < maxStack; i++ { + _, file, line, ok := goruntime.Caller(i) + if !ok { + file, line, ok = extractStackCreator() + if !ok { + break + } + i += maxStack + } + if hasPackage(file, append(ignoredPackages, "/runtime/asm_")) { + continue + } + + file = trimPackagePrefix(file) + name = fmt.Sprintf("%s:%d", file, line) + break + } + return name +} + +// hasPackage returns true if the file is in one of the ignored packages. +func hasPackage(file string, ignoredPackages []string) bool { + for _, ignoredPackage := range ignoredPackages { + if strings.Contains(file, ignoredPackage) { + return true + } + } + return false +} + +// trimPackagePrefix reduces duplicate values off the front of a package name. +func trimPackagePrefix(file string) string { + if l := strings.LastIndex(file, "/vendor/"); l >= 0 { + return file[l+len("/vendor/"):] + } + if l := strings.LastIndex(file, "/src/"); l >= 0 { + return file[l+5:] + } + if l := strings.LastIndex(file, "/pkg/"); l >= 0 { + return file[l+1:] + } + return file +} + +var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`) + +// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false +// if the creator cannot be located. +// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440 +func extractStackCreator() (string, int, bool) { + stack := debug.Stack() + matches := stackCreator.FindStringSubmatch(string(stack)) + if matches == nil || len(matches) != 4 { + return "", 0, false + } + line, err := strconv.Atoi(matches[3]) + if err != nil { + return "", 0, false + } + return matches[2], line, true +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go new file mode 100644 index 000000000000..078f00d9b979 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -0,0 +1,445 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + + "golang.org/x/net/http2" + "k8s.io/klog" +) + +// JoinPreservingTrailingSlash does a path.Join of the specified elements, +// preserving any trailing slash on the last non-empty segment +func JoinPreservingTrailingSlash(elem ...string) string { + // do the basic path join + result := path.Join(elem...) + + // find the last non-empty segment + for i := len(elem) - 1; i >= 0; i-- { + if len(elem[i]) > 0 { + // if the last segment ended in a slash, ensure our result does as well + if strings.HasSuffix(elem[i], "/") && !strings.HasSuffix(result, "/") { + result += "/" + } + break + } + } + + return result +} + +// IsProbableEOF returns true if the given error resembles a connection termination +// scenario that would justify assuming that the watch is empty. +// These errors are what the Go http stack returns back to us which are general +// connection closure errors (strongly correlated) and callers that need to +// differentiate probable errors in connection behavior between normal "this is +// disconnected" should use the method. +func IsProbableEOF(err error) bool { + if err == nil { + return false + } + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + msg := err.Error() + switch { + case err == io.EOF: + return true + case msg == "http: can't write HTTP request on broken connection": + return true + case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"): + return true + case strings.Contains(msg, "connection reset by peer"): + return true + case strings.Contains(strings.ToLower(msg), "use of closed network connection"): + return true + } + return false +} + +var defaultTransport = http.DefaultTransport.(*http.Transport) + +// SetOldTransportDefaults applies the defaults from http.DefaultTransport +// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset +func SetOldTransportDefaults(t *http.Transport) *http.Transport { + if t.Proxy == nil || isDefault(t.Proxy) { + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + // If no custom dialer is set, use the default context dialer + if t.DialContext == nil && t.Dial == nil { + t.DialContext = defaultTransport.DialContext + } + if t.TLSHandshakeTimeout == 0 { + t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout + } + return t +} + +// SetTransportDefaults applies the defaults from http.DefaultTransport +// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset +func SetTransportDefaults(t *http.Transport) *http.Transport { + t = SetOldTransportDefaults(t) + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { + klog.Infof("HTTP2 has been explicitly disabled") + } else { + if err := http2.ConfigureTransport(t); err != nil { + klog.Warningf("Transport failed http2 configuration: %v", err) + } + } + return t +} + +type RoundTripperWrapper interface { + http.RoundTripper + WrappedRoundTripper() http.RoundTripper +} + +type DialFunc func(ctx context.Context, net, addr string) (net.Conn, error) + +func DialerFor(transport http.RoundTripper) (DialFunc, error) { + if transport == nil { + return nil, nil + } + + switch transport := transport.(type) { + case *http.Transport: + // transport.DialContext takes precedence over transport.Dial + if transport.DialContext != nil { + return transport.DialContext, nil + } + // adapt transport.Dial to the DialWithContext signature + if transport.Dial != nil { + return func(ctx context.Context, net, addr string) (net.Conn, error) { + return transport.Dial(net, addr) + }, nil + } + // otherwise return nil + return nil, nil + case RoundTripperWrapper: + return DialerFor(transport.WrappedRoundTripper()) + default: + return nil, fmt.Errorf("unknown transport type: %T", transport) + } +} + +type TLSClientConfigHolder interface { + TLSClientConfig() *tls.Config +} + +func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { + if transport == nil { + return nil, nil + } + + switch transport := transport.(type) { + case *http.Transport: + return transport.TLSClientConfig, nil + case TLSClientConfigHolder: + return transport.TLSClientConfig(), nil + case RoundTripperWrapper: + return TLSClientConfig(transport.WrappedRoundTripper()) + default: + return nil, fmt.Errorf("unknown transport type: %T", transport) + } +} + +func FormatURL(scheme string, host string, port int, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(host, strconv.Itoa(port)), + Path: path, + } +} + +func GetHTTPClient(req *http.Request) string { + if ua := req.UserAgent(); len(ua) != 0 { + return ua + } + return "unknown" +} + +// SourceIPs splits the comma separated X-Forwarded-For header or returns the X-Real-Ip header or req.RemoteAddr, +// in that order, ignoring invalid IPs. It returns nil if all of these are empty or invalid. +func SourceIPs(req *http.Request) []net.IP { + hdr := req.Header + // First check the X-Forwarded-For header for requests via proxy. + hdrForwardedFor := hdr.Get("X-Forwarded-For") + forwardedForIPs := []net.IP{} + if hdrForwardedFor != "" { + // X-Forwarded-For can be a csv of IPs in case of multiple proxies. + // Use the first valid one. + parts := strings.Split(hdrForwardedFor, ",") + for _, part := range parts { + ip := net.ParseIP(strings.TrimSpace(part)) + if ip != nil { + forwardedForIPs = append(forwardedForIPs, ip) + } + } + } + if len(forwardedForIPs) > 0 { + return forwardedForIPs + } + + // Try the X-Real-Ip header. + hdrRealIp := hdr.Get("X-Real-Ip") + if hdrRealIp != "" { + ip := net.ParseIP(hdrRealIp) + if ip != nil { + return []net.IP{ip} + } + } + + // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy. + // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err == nil { + if remoteIP := net.ParseIP(host); remoteIP != nil { + return []net.IP{remoteIP} + } + } + + // Fallback if Remote Address was just IP. + if remoteIP := net.ParseIP(req.RemoteAddr); remoteIP != nil { + return []net.IP{remoteIP} + } + + return nil +} + +// Extracts and returns the clients IP from the given request. +// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order. +// Returns nil if none of them are set or is set to an invalid value. +func GetClientIP(req *http.Request) net.IP { + ips := SourceIPs(req) + if len(ips) == 0 { + return nil + } + return ips[0] +} + +// Prepares the X-Forwarded-For header for another forwarding hop by appending the previous sender's +// IP address to the X-Forwarded-For chain. +func AppendForwardedForHeader(req *http.Request) { + // Copied from net/http/httputil/reverseproxy.go: + if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { + // If we aren't the first proxy retain prior + // X-Forwarded-For information as a comma+space + // separated list and fold multiple headers into one. + if prior, ok := req.Header["X-Forwarded-For"]; ok { + clientIP = strings.Join(prior, ", ") + ", " + clientIP + } + req.Header.Set("X-Forwarded-For", clientIP) + } +} + +var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment) + +// isDefault checks to see if the transportProxierFunc is pointing to the default one +func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { + transportProxierPointer := fmt.Sprintf("%p", transportProxier) + return transportProxierPointer == defaultProxyFuncPointer +} + +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY (or no_proxy) envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + if noProxyEnv == "" { + noProxyEnv = os.Getenv("no_proxy") + } + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + ip := net.ParseIP(req.URL.Hostname()) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } +} + +// DialerFunc implements Dialer for the provided function. +type DialerFunc func(req *http.Request) (net.Conn, error) + +func (fn DialerFunc) Dial(req *http.Request) (net.Conn, error) { + return fn(req) +} + +// Dialer dials a host and writes a request to it. +type Dialer interface { + // Dial connects to the host specified by req's URL, writes the request to the connection, and + // returns the opened net.Conn. + Dial(req *http.Request) (net.Conn, error) +} + +// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to +// originalLocation). It returns the opened net.Conn and the raw response bytes. +// If requireSameHostRedirects is true, only redirects to the same host are permitted. +func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) { + const ( + maxRedirects = 9 // Fail on the 10th redirect + maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers + ) + + var ( + location = originalLocation + method = originalMethod + intermediateConn net.Conn + rawResponse = bytes.NewBuffer(make([]byte, 0, 256)) + body = originalBody + ) + + defer func() { + if intermediateConn != nil { + intermediateConn.Close() + } + }() + +redirectLoop: + for redirects := 0; ; redirects++ { + if redirects > maxRedirects { + return nil, nil, fmt.Errorf("too many redirects (%d)", redirects) + } + + req, err := http.NewRequest(method, location.String(), body) + if err != nil { + return nil, nil, err + } + + req.Header = header + + intermediateConn, err = dialer.Dial(req) + if err != nil { + return nil, nil, err + } + + // Peek at the backend response. + rawResponse.Reset() + respReader := bufio.NewReader(io.TeeReader( + io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes. + rawResponse)) // Save the raw response. + resp, err := http.ReadResponse(respReader, nil) + if err != nil { + // Unable to read the backend response; let the client handle it. + klog.Warningf("Error reading backend response: %v", err) + break redirectLoop + } + + switch resp.StatusCode { + case http.StatusFound: + // Redirect, continue. + default: + // Don't redirect. + break redirectLoop + } + + // Redirected requests switch to "GET" according to the HTTP spec: + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3 + method = "GET" + // don't send a body when following redirects + body = nil + + resp.Body.Close() // not used + + // Prepare to follow the redirect. + redirectStr := resp.Header.Get("Location") + if redirectStr == "" { + return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode) + } + // We have to parse relative to the current location, NOT originalLocation. For example, + // if we request http://foo.com/a and get back "http://bar.com/b", the result should be + // http://bar.com/b. If we then make that request and get back a redirect to "/c", the result + // should be http://bar.com/c, not http://foo.com/c. + location, err = location.Parse(redirectStr) + if err != nil { + return nil, nil, fmt.Errorf("malformed Location header: %v", err) + } + + // Only follow redirects to the same host. Otherwise, propagate the redirect response back. + if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { + break redirectLoop + } + + // Reset the connection. + intermediateConn.Close() + intermediateConn = nil + } + + connToReturn := intermediateConn + intermediateConn = nil // Don't close the connection when we return it. + return connToReturn, rawResponse.Bytes(), nil +} + +// CloneRequest creates a shallow copy of the request along with a deep copy of the Headers. +func CloneRequest(req *http.Request) *http.Request { + r := new(http.Request) + + // shallow clone + *r = *req + + // deep copy headers + r.Header = CloneHeader(req.Header) + + return r +} + +// CloneHeader creates a deep copy of an http.Header. +func CloneHeader(in http.Header) http.Header { + out := make(http.Header, len(in)) + for key, values := range in { + newValues := make([]string, len(values)) + copy(newValues, values) + out[key] = newValues + } + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go new file mode 100644 index 000000000000..daf5d2496455 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -0,0 +1,416 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + + "strings" + + "k8s.io/klog" +) + +type AddressFamily uint + +const ( + familyIPv4 AddressFamily = 4 + familyIPv6 AddressFamily = 6 +) + +const ( + ipv4RouteFile = "/proc/net/route" + ipv6RouteFile = "/proc/net/ipv6_route" +) + +type Route struct { + Interface string + Destination net.IP + Gateway net.IP + Family AddressFamily +} + +type RouteFile struct { + name string + parse func(input io.Reader) ([]Route, error) +} + +// noRoutesError can be returned by ChooseBindAddress() in case of no routes +type noRoutesError struct { + message string +} + +func (e noRoutesError) Error() string { + return e.message +} + +// IsNoRoutesError checks if an error is of type noRoutesError +func IsNoRoutesError(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case noRoutesError: + return true + default: + return false + } +} + +var ( + v4File = RouteFile{name: ipv4RouteFile, parse: getIPv4DefaultRoutes} + v6File = RouteFile{name: ipv6RouteFile, parse: getIPv6DefaultRoutes} +) + +func (rf RouteFile) extract() ([]Route, error) { + file, err := os.Open(rf.name) + if err != nil { + return nil, err + } + defer file.Close() + return rf.parse(file) +} + +// getIPv4DefaultRoutes obtains the IPv4 routes, and filters out non-default routes. +func getIPv4DefaultRoutes(input io.Reader) ([]Route, error) { + routes := []Route{} + scanner := bufio.NewReader(input) + for { + line, err := scanner.ReadString('\n') + if err == io.EOF { + break + } + //ignore the headers in the route info + if strings.HasPrefix(line, "Iface") { + continue + } + fields := strings.Fields(line) + // Interested in fields: + // 0 - interface name + // 1 - destination address + // 2 - gateway + dest, err := parseIP(fields[1], familyIPv4) + if err != nil { + return nil, err + } + gw, err := parseIP(fields[2], familyIPv4) + if err != nil { + return nil, err + } + if !dest.Equal(net.IPv4zero) { + continue + } + routes = append(routes, Route{ + Interface: fields[0], + Destination: dest, + Gateway: gw, + Family: familyIPv4, + }) + } + return routes, nil +} + +func getIPv6DefaultRoutes(input io.Reader) ([]Route, error) { + routes := []Route{} + scanner := bufio.NewReader(input) + for { + line, err := scanner.ReadString('\n') + if err == io.EOF { + break + } + fields := strings.Fields(line) + // Interested in fields: + // 0 - destination address + // 4 - gateway + // 9 - interface name + dest, err := parseIP(fields[0], familyIPv6) + if err != nil { + return nil, err + } + gw, err := parseIP(fields[4], familyIPv6) + if err != nil { + return nil, err + } + if !dest.Equal(net.IPv6zero) { + continue + } + if gw.Equal(net.IPv6zero) { + continue // loopback + } + routes = append(routes, Route{ + Interface: fields[9], + Destination: dest, + Gateway: gw, + Family: familyIPv6, + }) + } + return routes, nil +} + +// parseIP takes the hex IP address string from route file and converts it +// to a net.IP address. For IPv4, the value must be converted to big endian. +func parseIP(str string, family AddressFamily) (net.IP, error) { + if str == "" { + return nil, fmt.Errorf("input is nil") + } + bytes, err := hex.DecodeString(str) + if err != nil { + return nil, err + } + if family == familyIPv4 { + if len(bytes) != net.IPv4len { + return nil, fmt.Errorf("invalid IPv4 address in route") + } + return net.IP([]byte{bytes[3], bytes[2], bytes[1], bytes[0]}), nil + } + // Must be IPv6 + if len(bytes) != net.IPv6len { + return nil, fmt.Errorf("invalid IPv6 address in route") + } + return net.IP(bytes), nil +} + +func isInterfaceUp(intf *net.Interface) bool { + if intf == nil { + return false + } + if intf.Flags&net.FlagUp != 0 { + klog.V(4).Infof("Interface %v is up", intf.Name) + return true + } + return false +} + +func isLoopbackOrPointToPoint(intf *net.Interface) bool { + return intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) != 0 +} + +// getMatchingGlobalIP returns the first valid global unicast address of the given +// 'family' from the list of 'addrs'. +func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { + if len(addrs) > 0 { + for i := range addrs { + klog.V(4).Infof("Checking addr %s.", addrs[i].String()) + ip, _, err := net.ParseCIDR(addrs[i].String()) + if err != nil { + return nil, err + } + if memberOf(ip, family) { + if ip.IsGlobalUnicast() { + klog.V(4).Infof("IP found %v", ip) + return ip, nil + } else { + klog.V(4).Infof("Non-global unicast address found %v", ip) + } + } else { + klog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) + } + + } + } + return nil, nil +} + +// getIPFromInterface gets the IPs on an interface and returns a global unicast address, if any. The +// interface must be up, the IP must in the family requested, and the IP must be a global unicast address. +func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInterfacer) (net.IP, error) { + intf, err := nw.InterfaceByName(intfName) + if err != nil { + return nil, err + } + if isInterfaceUp(intf) { + addrs, err := nw.Addrs(intf) + if err != nil { + return nil, err + } + klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) + matchingIP, err := getMatchingGlobalIP(addrs, forFamily) + if err != nil { + return nil, err + } + if matchingIP != nil { + klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + return matchingIP, nil + } + } + return nil, nil +} + +// memberOF tells if the IP is of the desired family. Used for checking interface addresses. +func memberOf(ip net.IP, family AddressFamily) bool { + if ip.To4() != nil { + return family == familyIPv4 + } else { + return family == familyIPv6 + } +} + +// chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that +// has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP. +// Searches for IPv4 addresses, and then IPv6 addresses. +func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { + intfs, err := nw.Interfaces() + if err != nil { + return nil, err + } + if len(intfs) == 0 { + return nil, fmt.Errorf("no interfaces found on host.") + } + for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + for _, intf := range intfs { + if !isInterfaceUp(&intf) { + klog.V(4).Infof("Skipping: down interface %q", intf.Name) + continue + } + if isLoopbackOrPointToPoint(&intf) { + klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + continue + } + addrs, err := nw.Addrs(&intf) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + continue + } + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) + } + if !memberOf(ip, family) { + klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + continue + } + // TODO: Decide if should open up to allow IPv6 LLAs in future. + if !ip.IsGlobalUnicast() { + klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + continue + } + klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + return ip, nil + } + } + } + return nil, fmt.Errorf("no acceptable interface with global unicast address found on host") +} + +// ChooseHostInterface is a method used fetch an IP for a daemon. +// If there is no routing info file, it will choose a global IP from the system +// interfaces. Otherwise, it will use IPv4 and IPv6 route information to return the +// IP of the interface with a gateway on it (with priority given to IPv4). For a node +// with no internet connection, it returns error. +func ChooseHostInterface() (net.IP, error) { + var nw networkInterfacer = networkInterface{} + if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) { + return chooseIPFromHostInterfaces(nw) + } + routes, err := getAllDefaultRoutes() + if err != nil { + return nil, err + } + return chooseHostInterfaceFromRoute(routes, nw) +} + +// networkInterfacer defines an interface for several net library functions. Production +// code will forward to net library functions, and unit tests will override the methods +// for testing purposes. +type networkInterfacer interface { + InterfaceByName(intfName string) (*net.Interface, error) + Addrs(intf *net.Interface) ([]net.Addr, error) + Interfaces() ([]net.Interface, error) +} + +// networkInterface implements the networkInterfacer interface for production code, just +// wrapping the underlying net library function calls. +type networkInterface struct{} + +func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + return net.InterfaceByName(intfName) +} + +func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { + return intf.Addrs() +} + +func (_ networkInterface) Interfaces() ([]net.Interface, error) { + return net.Interfaces() +} + +// getAllDefaultRoutes obtains IPv4 and IPv6 default routes on the node. If unable +// to read the IPv4 routing info file, we return an error. If unable to read the IPv6 +// routing info file (which is optional), we'll just use the IPv4 route information. +// Using all the routing info, if no default routes are found, an error is returned. +func getAllDefaultRoutes() ([]Route, error) { + routes, err := v4File.extract() + if err != nil { + return nil, err + } + v6Routes, _ := v6File.extract() + routes = append(routes, v6Routes...) + if len(routes) == 0 { + return nil, noRoutesError{ + message: fmt.Sprintf("no default routes found in %q or %q", v4File.name, v6File.name), + } + } + return routes, nil +} + +// chooseHostInterfaceFromRoute cycles through each default route provided, looking for a +// global IP address from the interface for the route. Will first look all each IPv4 route for +// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. +func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { + for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) + for _, route := range routes { + if route.Family != family { + continue + } + klog.V(4).Infof("Default route transits interface %q", route.Interface) + finalIP, err := getIPFromInterface(route.Interface, family, nw) + if err != nil { + return nil, err + } + if finalIP != nil { + klog.V(4).Infof("Found active IP %v ", finalIP) + return finalIP, nil + } + } + } + klog.V(4).Infof("No active IP found by looking at default routes") + return nil, fmt.Errorf("unable to select an IP from default routes.") +} + +// If bind-address is usable, return it directly +// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default +// interface. +func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { + if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() { + hostIP, err := ChooseHostInterface() + if err != nil { + return nil, err + } + bindAddress = hostIP + } + return bindAddress, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go new file mode 100644 index 000000000000..7b6eca89321a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "strconv" + "strings" +) + +// PortRange represents a range of TCP/UDP ports. To represent a single port, +// set Size to 1. +type PortRange struct { + Base int + Size int +} + +// Contains tests whether a given port falls within the PortRange. +func (pr *PortRange) Contains(p int) bool { + return (p >= pr.Base) && ((p - pr.Base) < pr.Size) +} + +// String converts the PortRange to a string representation, which can be +// parsed by PortRange.Set or ParsePortRange. +func (pr PortRange) String() string { + if pr.Size == 0 { + return "" + } + return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1) +} + +// Set parses a string of the form "value", "min-max", or "min+offset", inclusive at both ends, and +// sets the PortRange from it. This is part of the flag.Value and pflag.Value +// interfaces. +func (pr *PortRange) Set(value string) error { + const ( + SinglePortNotation = 1 << iota + HyphenNotation + PlusNotation + ) + + value = strings.TrimSpace(value) + hyphenIndex := strings.Index(value, "-") + plusIndex := strings.Index(value, "+") + + if value == "" { + pr.Base = 0 + pr.Size = 0 + return nil + } + + var err error + var low, high int + var notation int + + if plusIndex == -1 && hyphenIndex == -1 { + notation |= SinglePortNotation + } + if hyphenIndex != -1 { + notation |= HyphenNotation + } + if plusIndex != -1 { + notation |= PlusNotation + } + + switch notation { + case SinglePortNotation: + var port int + port, err = strconv.Atoi(value) + if err != nil { + return err + } + low = port + high = port + case HyphenNotation: + low, err = strconv.Atoi(value[:hyphenIndex]) + if err != nil { + return err + } + high, err = strconv.Atoi(value[hyphenIndex+1:]) + if err != nil { + return err + } + case PlusNotation: + var offset int + low, err = strconv.Atoi(value[:plusIndex]) + if err != nil { + return err + } + offset, err = strconv.Atoi(value[plusIndex+1:]) + if err != nil { + return err + } + high = low + offset + default: + return fmt.Errorf("unable to parse port range: %s", value) + } + + if low > 65535 || high > 65535 { + return fmt.Errorf("the port range cannot be greater than 65535: %s", value) + } + + if high < low { + return fmt.Errorf("end port cannot be less than start port: %s", value) + } + + pr.Base = low + pr.Size = 1 + high - low + return nil +} + +// Type returns a descriptive string about this type. This is part of the +// pflag.Value interface. +func (*PortRange) Type() string { + return "portRange" +} + +// ParsePortRange parses a string of the form "min-max", inclusive at both +// ends, and initializs a new PortRange from it. +func ParsePortRange(value string) (*PortRange, error) { + pr := &PortRange{} + err := pr.Set(value) + if err != nil { + return nil, err + } + return pr, nil +} + +func ParsePortRangeOrDie(value string) *PortRange { + pr, err := ParsePortRange(value) + if err != nil { + panic(fmt.Sprintf("couldn't parse port range %q: %v", value, err)) + } + return pr +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go new file mode 100644 index 000000000000..c0fd4e20fe54 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go @@ -0,0 +1,77 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +var validSchemes = sets.NewString("http", "https", "") + +// SplitSchemeNamePort takes a string of the following forms: +// * "", returns "", "","", true +// * ":", returns "", "","",true +// * "::", returns "","","",true +// +// Name must be non-empty or valid will be returned false. +// Scheme must be "http" or "https" if specified +// Port is returned as a string, and it is not required to be numeric (could be +// used for a named port, for example). +func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) { + parts := strings.Split(id, ":") + switch len(parts) { + case 1: + name = parts[0] + case 2: + name = parts[0] + port = parts[1] + case 3: + scheme = parts[0] + name = parts[1] + port = parts[2] + default: + return "", "", "", false + } + + if len(name) > 0 && validSchemes.Has(scheme) { + return scheme, name, port, true + } else { + return "", "", "", false + } +} + +// JoinSchemeNamePort returns a string that specifies the scheme, name, and port: +// * "" +// * ":" +// * "::" +// None of the parameters may contain a ':' character +// Name is required +// Scheme must be "", "http", or "https" +func JoinSchemeNamePort(scheme, name, port string) string { + if len(scheme) > 0 { + // Must include three segments to specify scheme + return scheme + ":" + name + ":" + port + } + if len(port) > 0 { + // Must include two segments to specify port + return name + ":" + port + } + // Return name alone + return name +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go new file mode 100644 index 000000000000..8344d10c83ae --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -0,0 +1,56 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "net" + "net/url" + "os" + "reflect" + "syscall" +) + +// IPNetEqual checks if the two input IPNets are representing the same subnet. +// For example, +// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet. +// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet. +func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { + if ipnet1 == nil || ipnet2 == nil { + return false + } + if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) { + return true + } + return false +} + +// Returns if the given err is "connection reset by peer" error. +func IsConnectionReset(err error) bool { + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + if osErr, ok := err.(*os.SyscallError); ok { + err = osErr.Err + } + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNRESET { + return true + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go new file mode 100644 index 000000000000..8e34f9261397 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -0,0 +1,173 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "runtime" + "sync" + "time" + + "k8s.io/klog" +) + +var ( + // ReallyCrash controls the behavior of HandleCrash and now defaults + // true. It's still exposed so components can optionally set to false + // to restore prior behavior. + ReallyCrash = true +) + +// PanicHandlers is a list of functions which will be invoked when a panic happens. +var PanicHandlers = []func(interface{}){logPanic} + +// HandleCrash simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// TODO: remove this function. We are switching to a world where it's safe for +// apiserver to panic, since it will be restarted by kubelet. At the beginning +// of the Kubernetes project, nothing was going to restart apiserver and so +// catching panics was important. But it's actually much simpler for monitoring +// software if we just exit when an unexpected panic happens. +func HandleCrash(additionalHandlers ...func(interface{})) { + if r := recover(); r != nil { + for _, fn := range PanicHandlers { + fn(r) + } + for _, fn := range additionalHandlers { + fn(r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) + } + } +} + +// logPanic logs the caller tree when a panic occurs. +func logPanic(r interface{}) { + callers := getCallers(r) + if _, ok := r.(string); ok { + klog.Errorf("Observed a panic: %s\n%v", r, callers) + } else { + klog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + } +} + +func getCallers(r interface{}) string { + callers := "" + for i := 0; true; i++ { + _, file, line, ok := runtime.Caller(i) + if !ok { + break + } + callers = callers + fmt.Sprintf("%v:%v\n", file, line) + } + + return callers +} + +// ErrorHandlers is a list of functions which will be invoked when an unreturnable +// error occurs. +// TODO(lavalamp): for testability, this and the below HandleError function +// should be packaged up into a testable and reusable object. +var ErrorHandlers = []func(error){ + logError, + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError, +} + +// HandlerError is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. +func HandleError(err error) { + // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead + if err == nil { + return + } + + for _, fn := range ErrorHandlers { + fn(err) + } +} + +// logError prints an error with the call stack of the location it was reported +func logError(err error) { + klog.ErrorDepth(2, err) +} + +type rudimentaryErrorBackoff struct { + minPeriod time.Duration // immutable + // TODO(lavalamp): use the clock for testability. Need to move that + // package for that to be accessible here. + lastErrorTimeLock sync.Mutex + lastErrorTime time.Time +} + +// OnError will block if it is called more often than the embedded period time. +// This will prevent overly tight hot error loops. +func (r *rudimentaryErrorBackoff) OnError(error) { + r.lastErrorTimeLock.Lock() + defer r.lastErrorTimeLock.Unlock() + d := time.Since(r.lastErrorTime) + if d < r.minPeriod { + // If the time moves backwards for any reason, do nothing + time.Sleep(r.minPeriod - d) + } + r.lastErrorTime = time.Now() +} + +// GetCaller returns the caller of the function that calls it. +func GetCaller() string { + var pc [1]uintptr + runtime.Callers(3, pc[:]) + f := runtime.FuncForPC(pc[0]) + if f == nil { + return fmt.Sprintf("Unable to find caller") + } + return f.Name() +} + +// RecoverFromPanic replaces the specified error with an error containing the +// original error, and the call tree when a panic occurs. This enables error +// handlers to handle errors and panics the same way. +func RecoverFromPanic(err *error) { + if r := recover(); r != nil { + callers := getCallers(r) + + *err = fmt.Errorf( + "recovered from panic %q. (err=%v) Call stack:\n%v", + r, + *err, + callers) + } +} + +// Must panics on non-nil errors. Useful to handling programmer level errors. +func Must(err error) { + if err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go new file mode 100644 index 000000000000..766f4501e0f2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -0,0 +1,203 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +type Byte map[byte]Empty + +// NewByte creates a Byte from a list of values. +func NewByte(items ...byte) Byte { + ss := Byte{} + ss.Insert(items...) + return ss +} + +// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func ByteKeySet(theMap interface{}) Byte { + v := reflect.ValueOf(theMap) + ret := Byte{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(byte)) + } + return ret +} + +// Insert adds items to the set. +func (s Byte) Insert(items ...byte) { + for _, item := range items { + s[item] = Empty{} + } +} + +// Delete removes all items from the set. +func (s Byte) Delete(items ...byte) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s Byte) Has(item byte) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Byte) HasAll(items ...byte) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Byte) HasAny(items ...byte) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Byte) Difference(s2 Byte) Byte { + result := NewByte() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Byte) Union(s2 Byte) Byte { + result := NewByte() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Byte) Intersection(s2 Byte) Byte { + var walk, other Byte + result := NewByte() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Byte) IsSuperset(s2 Byte) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Byte) Equal(s2 Byte) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfByte []byte + +func (s sortableSliceOfByte) Len() int { return len(s) } +func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) } +func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted byte slice. +func (s Byte) List() []byte { + res := make(sortableSliceOfByte, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []byte(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Byte) UnsortedList() []byte { + res := make([]byte, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Byte) PopAny() (byte, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue byte + return zeroValue, false +} + +// Len returns the size of the set. +func (s Byte) Len() int { + return len(s) +} + +func lessByte(lhs, rhs byte) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go new file mode 100644 index 000000000000..b152a0bf00f2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +// Package sets has auto-generated set types. +package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go new file mode 100644 index 000000000000..e11e622c5ba0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go new file mode 100644 index 000000000000..a0a513cd9b51 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -0,0 +1,203 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +type Int map[int]Empty + +// NewInt creates a Int from a list of values. +func NewInt(items ...int) Int { + ss := Int{} + ss.Insert(items...) + return ss +} + +// IntKeySet creates a Int from a keys of a map[int](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func IntKeySet(theMap interface{}) Int { + v := reflect.ValueOf(theMap) + ret := Int{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int)) + } + return ret +} + +// Insert adds items to the set. +func (s Int) Insert(items ...int) { + for _, item := range items { + s[item] = Empty{} + } +} + +// Delete removes all items from the set. +func (s Int) Delete(items ...int) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s Int) Has(item int) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int) HasAll(items ...int) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int) HasAny(items ...int) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int) Difference(s2 Int) Int { + result := NewInt() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int) Union(s2 Int) Int { + result := NewInt() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int) Intersection(s2 Int) Int { + var walk, other Int + result := NewInt() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int) IsSuperset(s2 Int) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int) Equal(s2 Int) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt []int + +func (s sortableSliceOfInt) Len() int { return len(s) } +func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) } +func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int slice. +func (s Int) List() []int { + res := make(sortableSliceOfInt, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int) UnsortedList() []int { + res := make([]int, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int) PopAny() (int, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int) Len() int { + return len(s) +} + +func lessInt(lhs, rhs int) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go new file mode 100644 index 000000000000..9ca9af0c5918 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -0,0 +1,203 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +type Int64 map[int64]Empty + +// NewInt64 creates a Int64 from a list of values. +func NewInt64(items ...int64) Int64 { + ss := Int64{} + ss.Insert(items...) + return ss +} + +// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int64KeySet(theMap interface{}) Int64 { + v := reflect.ValueOf(theMap) + ret := Int64{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int64)) + } + return ret +} + +// Insert adds items to the set. +func (s Int64) Insert(items ...int64) { + for _, item := range items { + s[item] = Empty{} + } +} + +// Delete removes all items from the set. +func (s Int64) Delete(items ...int64) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s Int64) Has(item int64) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int64) HasAll(items ...int64) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int64) HasAny(items ...int64) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int64) Difference(s2 Int64) Int64 { + result := NewInt64() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int64) Union(s2 Int64) Int64 { + result := NewInt64() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int64) Intersection(s2 Int64) Int64 { + var walk, other Int64 + result := NewInt64() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int64) IsSuperset(s2 Int64) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int64) Equal(s2 Int64) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt64 []int64 + +func (s sortableSliceOfInt64) Len() int { return len(s) } +func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } +func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int64 slice. +func (s Int64) List() []int64 { + res := make(sortableSliceOfInt64, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int64(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int64) UnsortedList() []int64 { + res := make([]int64, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int64) PopAny() (int64, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int64 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int64) Len() int { + return len(s) +} + +func lessInt64(lhs, rhs int64) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go new file mode 100644 index 000000000000..ba00ad7df4e7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -0,0 +1,203 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) { + for _, item := range items { + s[item] = Empty{} + } +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go new file mode 100644 index 000000000000..4767fd1dda10 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -0,0 +1,259 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Error is an implementation of the 'error' interface, which represents a +// field-level validation error. +type Error struct { + Type ErrorType + Field string + BadValue interface{} + Detail string +} + +var _ error = &Error{} + +// Error implements the error interface. +func (v *Error) Error() string { + return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) +} + +// ErrorBody returns the error message without the field name. This is useful +// for building nice-looking higher-level error reporting. +func (v *Error) ErrorBody() string { + var s string + switch v.Type { + case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: + s = v.Type.String() + default: + value := v.BadValue + valueType := reflect.TypeOf(value) + if value == nil || valueType == nil { + value = "null" + } else if valueType.Kind() == reflect.Ptr { + if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { + value = "null" + } else { + value = reflectValue.Elem().Interface() + } + } + switch t := value.(type) { + case int64, int32, float64, float32, bool: + // use simple printer for simple types + s = fmt.Sprintf("%s: %v", v.Type, value) + case string: + s = fmt.Sprintf("%s: %q", v.Type, t) + case fmt.Stringer: + // anything that defines String() is better than raw struct + s = fmt.Sprintf("%s: %s", v.Type, t.String()) + default: + // fallback to raw struct + // TODO: internal types have panic guards against json.Marshalling to prevent + // accidental use of internal types in external serialized form. For now, use + // %#v, although it would be better to show a more expressive output in the future + s = fmt.Sprintf("%s: %#v", v.Type, value) + } + } + if len(v.Detail) != 0 { + s += fmt.Sprintf(": %s", v.Detail) + } + return s +} + +// ErrorType is a machine readable value providing more detail about why +// a field is invalid. These values are expected to match 1-1 with +// CauseType in api/types.go. +type ErrorType string + +// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. +const ( + // ErrorTypeNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). See NotFound(). + ErrorTypeNotFound ErrorType = "FieldValueNotFound" + // ErrorTypeRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). See + // Required(). + ErrorTypeRequired ErrorType = "FieldValueRequired" + // ErrorTypeDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). See Duplicate(). + ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" + // ErrorTypeInvalid is used to report malformed values (e.g. failed regex + // match, too long, out of bounds). See Invalid(). + ErrorTypeInvalid ErrorType = "FieldValueInvalid" + // ErrorTypeNotSupported is used to report unknown values for enumerated + // fields (e.g. a list of valid values). See NotSupported(). + ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" + // ErrorTypeForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). See + // Forbidden(). + ErrorTypeForbidden ErrorType = "FieldValueForbidden" + // ErrorTypeTooLong is used to report that the given value is too long. + // This is similar to ErrorTypeInvalid, but the error will not include the + // too-long value. See TooLong(). + ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + ErrorTypeInternal ErrorType = "InternalError" +) + +// String converts a ErrorType into its corresponding canonical error message. +func (t ErrorType) String() string { + switch t { + case ErrorTypeNotFound: + return "Not found" + case ErrorTypeRequired: + return "Required value" + case ErrorTypeDuplicate: + return "Duplicate value" + case ErrorTypeInvalid: + return "Invalid value" + case ErrorTypeNotSupported: + return "Unsupported value" + case ErrorTypeForbidden: + return "Forbidden" + case ErrorTypeTooLong: + return "Too long" + case ErrorTypeInternal: + return "Internal error" + default: + panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) + } +} + +// NotFound returns a *Error indicating "value not found". This is +// used to report failure to find a requested value (e.g. looking up an ID). +func NotFound(field *Path, value interface{}) *Error { + return &Error{ErrorTypeNotFound, field.String(), value, ""} +} + +// Required returns a *Error indicating "value required". This is used +// to report required values that are not provided (e.g. empty strings, null +// values, or empty arrays). +func Required(field *Path, detail string) *Error { + return &Error{ErrorTypeRequired, field.String(), "", detail} +} + +// Duplicate returns a *Error indicating "duplicate value". This is +// used to report collisions of values that must be unique (e.g. names or IDs). +func Duplicate(field *Path, value interface{}) *Error { + return &Error{ErrorTypeDuplicate, field.String(), value, ""} +} + +// Invalid returns a *Error indicating "invalid value". This is used +// to report malformed values (e.g. failed regex match, too long, out of bounds). +func Invalid(field *Path, value interface{}, detail string) *Error { + return &Error{ErrorTypeInvalid, field.String(), value, detail} +} + +// NotSupported returns a *Error indicating "unsupported value". +// This is used to report unknown values for enumerated fields (e.g. a list of +// valid values). +func NotSupported(field *Path, value interface{}, validValues []string) *Error { + detail := "" + if validValues != nil && len(validValues) > 0 { + quotedValues := make([]string, len(validValues)) + for i, v := range validValues { + quotedValues[i] = strconv.Quote(v) + } + detail = "supported values: " + strings.Join(quotedValues, ", ") + } + return &Error{ErrorTypeNotSupported, field.String(), value, detail} +} + +// Forbidden returns a *Error indicating "forbidden". This is used to +// report valid (as per formatting rules) values which would be accepted under +// some conditions, but which are not permitted by current conditions (e.g. +// security policy). +func Forbidden(field *Path, detail string) *Error { + return &Error{ErrorTypeForbidden, field.String(), "", detail} +} + +// TooLong returns a *Error indicating "too long". This is used to +// report that the given value is too long. This is similar to +// Invalid, but the returned error will not include the too-long +// value. +func TooLong(field *Path, value interface{}, maxLength int) *Error { + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} +} + +// InternalError returns a *Error indicating "internal error". This is used +// to signal that an error was found that was not directly related to user +// input. The err argument must be non-nil. +func InternalError(field *Path, err error) *Error { + return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} +} + +// ErrorList holds a set of Errors. It is plausible that we might one day have +// non-field errors in this same umbrella package, but for now we don't, so +// we can keep it simple and leave ErrorList here. +type ErrorList []*Error + +// NewErrorTypeMatcher returns an errors.Matcher that returns true +// if the provided error is a Error and has the provided ErrorType. +func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { + return func(err error) bool { + if e, ok := err.(*Error); ok { + return e.Type == t + } + return false + } +} + +// ToAggregate converts the ErrorList into an errors.Aggregate. +func (list ErrorList) ToAggregate() utilerrors.Aggregate { + errs := make([]error, 0, len(list)) + errorMsgs := sets.NewString() + for _, err := range list { + msg := fmt.Sprintf("%v", err) + if errorMsgs.Has(msg) { + continue + } + errorMsgs.Insert(msg) + errs = append(errs, err) + } + return utilerrors.NewAggregate(errs) +} + +func fromAggregate(agg utilerrors.Aggregate) ErrorList { + errs := agg.Errors() + list := make(ErrorList, len(errs)) + for i := range errs { + list[i] = errs[i].(*Error) + } + return list +} + +// Filter removes items from the ErrorList that match the provided fns. +func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { + err := utilerrors.FilterOut(list.ToAggregate(), fns...) + if err == nil { + return nil + } + // FilterOut takes an Aggregate and returns an Aggregate + return fromAggregate(err.(utilerrors.Aggregate)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go new file mode 100644 index 000000000000..2efc8eec76d9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go @@ -0,0 +1,91 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "bytes" + "fmt" + "strconv" +) + +// Path represents the path from some root to a particular field. +type Path struct { + name string // the name of this field or "" if this is an index + index string // if name == "", this is a subscript (index or map key) of the previous element + parent *Path // nil if this is the root element +} + +// NewPath creates a root Path object. +func NewPath(name string, moreNames ...string) *Path { + r := &Path{name: name, parent: nil} + for _, anotherName := range moreNames { + r = &Path{name: anotherName, parent: r} + } + return r +} + +// Root returns the root element of this Path. +func (p *Path) Root() *Path { + for ; p.parent != nil; p = p.parent { + // Do nothing. + } + return p +} + +// Child creates a new Path that is a child of the method receiver. +func (p *Path) Child(name string, moreNames ...string) *Path { + r := NewPath(name, moreNames...) + r.Root().parent = p + return r +} + +// Index indicates that the previous Path is to be subscripted by an int. +// This sets the same underlying value as Key. +func (p *Path) Index(index int) *Path { + return &Path{index: strconv.Itoa(index), parent: p} +} + +// Key indicates that the previous Path is to be subscripted by a string. +// This sets the same underlying value as Index. +func (p *Path) Key(key string) *Path { + return &Path{index: key, parent: p} +} + +// String produces a string representation of the Path. +func (p *Path) String() string { + // make a slice to iterate + elems := []*Path{} + for ; p != nil; p = p.parent { + elems = append(elems, p) + } + + // iterate, but it has to be backwards + buf := bytes.NewBuffer(nil) + for i := range elems { + p := elems[len(elems)-1-i] + if p.parent != nil && len(p.name) > 0 { + // This is either the root or it is a subscript. + buf.WriteString(".") + } + if len(p.name) > 0 { + buf.WriteString(p.name) + } else { + fmt.Fprintf(buf, "[%s]", p.index) + } + } + return buf.String() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go new file mode 100644 index 000000000000..2dd99992dcad --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -0,0 +1,416 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "math" + "net" + "regexp" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const qualifiedNameMaxLength int = 63 + +var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") + +// IsQualifiedName tests whether the value passed is what Kubernetes calls a +// "qualified name". This is a format used in various places throughout the +// system. If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsQualifiedName(value string) []string { + var errs []string + parts := strings.Split(value, "/") + var name string + switch len(parts) { + case 1: + name = parts[0] + case 2: + var prefix string + prefix, name = parts[0], parts[1] + if len(prefix) == 0 { + errs = append(errs, "prefix part "+EmptyError()) + } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { + errs = append(errs, prefixEach(msgs, "prefix part ")...) + } + default: + return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ + " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") + } + + if len(name) == 0 { + errs = append(errs, "name part "+EmptyError()) + } else if len(name) > qualifiedNameMaxLength { + errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) + } + if !qualifiedNameRegexp.MatchString(name) { + errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) + } + return errs +} + +// IsFullyQualifiedName checks if the name is fully qualified. +func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 3 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots")) + } + return allErrors +} + +const labelValueFmt string = "(" + qualifiedNameFmt + ")?" +const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" + +// LabelValueMaxLength is a label's max length +const LabelValueMaxLength int = 63 + +var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") + +// IsValidLabelValue tests whether the value passed is a valid label value. If +// the value is not valid, a list of error strings is returned. Otherwise an +// empty list (or nil) is returned. +func IsValidLabelValue(value string) []string { + var errs []string + if len(value) > LabelValueMaxLength { + errs = append(errs, MaxLenError(LabelValueMaxLength)) + } + if !labelValueRegexp.MatchString(value) { + errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) + } + return errs +} + +const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" + +// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) +const DNS1123LabelMaxLength int = 63 + +var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") + +// IsDNS1123Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1123). +func IsDNS1123Label(value string) []string { + var errs []string + if len(value) > DNS1123LabelMaxLength { + errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) + } + if !dns1123LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } + return errs +} + +const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" +const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + +// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) +const DNS1123SubdomainMaxLength int = 253 + +var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") + +// IsDNS1123Subdomain tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123). +func IsDNS1123Subdomain(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !dns1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) + } + return errs +} + +const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" +const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" + +// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) +const DNS1035LabelMaxLength int = 63 + +var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") + +// IsDNS1035Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1035). +func IsDNS1035Label(value string) []string { + var errs []string + if len(value) > DNS1035LabelMaxLength { + errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) + } + if !dns1035LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) + } + return errs +} + +// wildcard definition - RFC 1034 section 4.3.3. +// examples: +// - valid: *.bar.com, *.foo.bar.com +// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * +const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt +const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" + +// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a +// wildcard subdomain in DNS (RFC 1034 section 4.3.3). +func IsWildcardDNS1123Subdomain(value string) []string { + wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") + + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !wildcardDNS1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) + } + return errs +} + +const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" +const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" + +var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$") + +// IsCIdentifier tests for a string that conforms the definition of an identifier +// in C. This checks the format, but not the length. +func IsCIdentifier(value string) []string { + if !cIdentifierRegexp.MatchString(value) { + return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")} + } + return nil +} + +// IsValidPortNum tests that the argument is a valid, non-zero port number. +func IsValidPortNum(port int) []string { + if 1 <= port && port <= 65535 { + return nil + } + return []string{InclusiveRangeError(1, 65535)} +} + +// IsInRange tests that the argument is in an inclusive range. +func IsInRange(value int, min int, max int) []string { + if value >= min && value <= max { + return nil + } + return []string{InclusiveRangeError(min, max)} +} + +// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 +// TODO: once we have a type for UID/GID we should make these that type. +const ( + minUserID = 0 + maxUserID = math.MaxInt32 + minGroupID = 0 + maxGroupID = math.MaxInt32 +) + +// IsValidGroupID tests that the argument is a valid Unix GID. +func IsValidGroupID(gid int64) []string { + if minGroupID <= gid && gid <= maxGroupID { + return nil + } + return []string{InclusiveRangeError(minGroupID, maxGroupID)} +} + +// IsValidUserID tests that the argument is a valid Unix UID. +func IsValidUserID(uid int64) []string { + if minUserID <= uid && uid <= maxUserID { + return nil + } + return []string{InclusiveRangeError(minUserID, maxUserID)} +} + +var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$") +var portNameOneLetterRegexp = regexp.MustCompile("[a-z]") + +// IsValidPortName check that the argument is valid syntax. It must be +// non-empty and no more than 15 characters long. It may contain only [-a-z0-9] +// and must contain at least one letter [a-z]. It must not start or end with a +// hyphen, nor contain adjacent hyphens. +// +// Note: We only allow lower-case characters, even though RFC 6335 is case +// insensitive. +func IsValidPortName(port string) []string { + var errs []string + if len(port) > 15 { + errs = append(errs, MaxLenError(15)) + } + if !portNameCharsetRegex.MatchString(port) { + errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") + } + if !portNameOneLetterRegexp.MatchString(port) { + errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") + } + if strings.Contains(port, "--") { + errs = append(errs, "must not contain consecutive hyphens") + } + if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') { + errs = append(errs, "must not begin or end with a hyphen") + } + return errs +} + +// IsValidIP tests that the argument is a valid IP address. +func IsValidIP(value string) []string { + if net.ParseIP(value) == nil { + return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} + } + return nil +} + +const percentFmt string = "[0-9]+%" +const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" + +var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") + +// IsValidPercent checks that string is in the form of a percentage +func IsValidPercent(percent string) []string { + if !percentRegexp.MatchString(percent) { + return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} + } + return nil +} + +const httpHeaderNameFmt string = "[-A-Za-z0-9]+" +const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'" + +var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$") + +// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's +// definition of a valid header field name (a stricter subset than RFC7230). +func IsHTTPHeaderName(value string) []string { + if !httpHeaderNameRegexp.MatchString(value) { + return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")} + } + return nil +} + +const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" +const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" + +var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") + +// IsEnvVarName tests if a string is a valid environment variable name. +func IsEnvVarName(value string) []string { + var errs []string + if !envVarNameRegexp.MatchString(value) { + errs = append(errs, RegexError(envVarNameFmtErrMsg, envVarNameFmt, "my.env-name", "MY_ENV.NAME", "MyEnvName1")) + } + + errs = append(errs, hasChDirPrefix(value)...) + return errs +} + +const configMapKeyFmt = `[-._a-zA-Z0-9]+` +const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" + +var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$") + +// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret +func IsConfigMapKey(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !configMapKeyRegexp.MatchString(value) { + errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) + } + errs = append(errs, hasChDirPrefix(value)...) + return errs +} + +// MaxLenError returns a string explanation of a "string too long" validation +// failure. +func MaxLenError(length int) string { + return fmt.Sprintf("must be no more than %d characters", length) +} + +// RegexError returns a string explanation of a regex validation failure. +func RegexError(msg string, fmt string, examples ...string) string { + if len(examples) == 0 { + return msg + " (regex used for validation is '" + fmt + "')" + } + msg += " (e.g. " + for i := range examples { + if i > 0 { + msg += " or " + } + msg += "'" + examples[i] + "', " + } + msg += "regex used for validation is '" + fmt + "')" + return msg +} + +// EmptyError returns a string explanation of a "must not be empty" validation +// failure. +func EmptyError() string { + return "must be non-empty" +} + +func prefixEach(msgs []string, prefix string) []string { + for i := range msgs { + msgs[i] = prefix + msgs[i] + } + return msgs +} + +// InclusiveRangeError returns a string explanation of a numeric "must be +// between" validation failure. +func InclusiveRangeError(lo, hi int) string { + return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) +} + +func hasChDirPrefix(value string) []string { + var errs []string + switch { + case value == ".": + errs = append(errs, `must not be '.'`) + case value == "..": + errs = append(errs, `must not be '..'`) + case strings.HasPrefix(value, ".."): + errs = append(errs, `must not start with '..'`) + } + return errs +} + +// IsValidSocketAddr checks that string represents a valid socket address +// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) +func IsValidSocketAddr(value string) []string { + var errs []string + ip, port, err := net.SplitHostPort(value) + if err != nil { + errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") + return errs + } + portInt, _ := strconv.Atoi(port) + errs = append(errs, IsValidPortNum(portInt)...) + errs = append(errs, IsValidIP(ip)...) + return errs +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go new file mode 100644 index 000000000000..7e6bf3fb95dc --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package watch contains a generic watchable interface, and a fake for +// testing code that uses the watch interface. +package watch // import "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/k8s.io/apimachinery/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go new file mode 100644 index 000000000000..22c9449f59cb --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/filter.go @@ -0,0 +1,105 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" +) + +// FilterFunc should take an event, possibly modify it in some way, and return +// the modified event. If the event should be ignored, then return keep=false. +type FilterFunc func(in Event) (out Event, keep bool) + +// Filter passes all events through f before allowing them to pass on. +// Putting a filter on a watch, as an unavoidable side-effect due to the way +// go channels work, effectively causes the watch's event channel to have its +// queue length increased by one. +// +// WARNING: filter has a fatal flaw, in that it can't properly update the +// Type field (Add/Modified/Deleted) to reflect items beginning to pass the +// filter when they previously didn't. +// +func Filter(w Interface, f FilterFunc) Interface { + fw := &filteredWatch{ + incoming: w, + result: make(chan Event), + f: f, + } + go fw.loop() + return fw +} + +type filteredWatch struct { + incoming Interface + result chan Event + f FilterFunc +} + +// ResultChan returns a channel which will receive filtered events. +func (fw *filteredWatch) ResultChan() <-chan Event { + return fw.result +} + +// Stop stops the upstream watch, which will eventually stop this watch. +func (fw *filteredWatch) Stop() { + fw.incoming.Stop() +} + +// loop waits for new values, filters them, and resends them. +func (fw *filteredWatch) loop() { + defer close(fw.result) + for event := range fw.incoming.ResultChan() { + filtered, keep := fw.f(event) + if keep { + fw.result <- filtered + } + } +} + +// Recorder records all events that are sent from the watch until it is closed. +type Recorder struct { + Interface + + lock sync.Mutex + events []Event +} + +var _ Interface = &Recorder{} + +// NewRecorder wraps an Interface and records any changes sent across it. +func NewRecorder(w Interface) *Recorder { + r := &Recorder{} + r.Interface = Filter(w, r.record) + return r +} + +// record is a FilterFunc and tracks each received event. +func (r *Recorder) record(in Event) (Event, bool) { + r.lock.Lock() + defer r.lock.Unlock() + r.events = append(r.events, in) + return in, true +} + +// Events returns a copy of the events sent across this recorder. +func (r *Recorder) Events() []Event { + r.lock.Lock() + defer r.lock.Unlock() + copied := make([]Event, len(r.events)) + copy(copied, r.events) + return copied +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go new file mode 100644 index 000000000000..0ac8dc4ef9e7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -0,0 +1,260 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch +// channel is full. +type FullChannelBehavior int + +const ( + WaitIfChannelFull FullChannelBehavior = iota + DropIfChannelFull +) + +// Buffer the incoming queue a little bit even though it should rarely ever accumulate +// anything, just in case a few events are received in such a short window that +// Broadcaster can't move them onto the watchers' queues fast enough. +const incomingQueueLength = 25 + +// Broadcaster distributes event notifications among any number of watchers. Every event +// is delivered to every watcher. +type Broadcaster struct { + // TODO: see if this lock is needed now that new watchers go through + // the incoming channel. + lock sync.Mutex + + watchers map[int64]*broadcasterWatcher + nextWatcher int64 + distributing sync.WaitGroup + + incoming chan Event + + // How large to make watcher's channel. + watchQueueLength int + // If one of the watch channels is full, don't wait for it to become empty. + // Instead just deliver it to the watchers that do have space in their + // channels and move on to the next event. + // It's more fair to do this on a per-watcher basis than to do it on the + // "incoming" channel, which would allow one slow watcher to prevent all + // other watchers from getting new events. + fullChannelBehavior FullChannelBehavior +} + +// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher. +// It is guaranteed that events will be distributed in the order in which they occur, +// but the order in which a single event is distributed among all of the watchers is unspecified. +func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster { + m := &Broadcaster{ + watchers: map[int64]*broadcasterWatcher{}, + incoming: make(chan Event, incomingQueueLength), + watchQueueLength: queueLength, + fullChannelBehavior: fullChannelBehavior, + } + m.distributing.Add(1) + go m.loop() + return m +} + +const internalRunFunctionMarker = "internal-do-function" + +// a function type we can shoehorn into the queue. +type functionFakeRuntimeObject func() + +func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind { + return schema.EmptyObjectKind +} +func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object { + if obj == nil { + return nil + } + // funcs are immutable. Hence, just return the original func. + return obj +} + +// Execute f, blocking the incoming queue (and waiting for it to drain first). +// The purpose of this terrible hack is so that watchers added after an event +// won't ever see that event, and will always see any event after they are +// added. +func (b *Broadcaster) blockQueue(f func()) { + var wg sync.WaitGroup + wg.Add(1) + b.incoming <- Event{ + Type: internalRunFunctionMarker, + Object: functionFakeRuntimeObject(func() { + defer wg.Done() + f() + }), + } + wg.Wait() +} + +// Watch adds a new watcher to the list and returns an Interface for it. +// Note: new watchers will only receive new events. They won't get an entire history +// of previous events. +func (m *Broadcaster) Watch() Interface { + var w *broadcasterWatcher + m.blockQueue(func() { + m.lock.Lock() + defer m.lock.Unlock() + id := m.nextWatcher + m.nextWatcher++ + w = &broadcasterWatcher{ + result: make(chan Event, m.watchQueueLength), + stopped: make(chan struct{}), + id: id, + m: m, + } + m.watchers[id] = w + }) + return w +} + +// WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends +// queuedEvents down the new watch before beginning to send ordinary events from Broadcaster. +// The returned watch will have a queue length that is at least large enough to accommodate +// all of the items in queuedEvents. +func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { + var w *broadcasterWatcher + m.blockQueue(func() { + m.lock.Lock() + defer m.lock.Unlock() + id := m.nextWatcher + m.nextWatcher++ + length := m.watchQueueLength + if n := len(queuedEvents) + 1; n > length { + length = n + } + w = &broadcasterWatcher{ + result: make(chan Event, length), + stopped: make(chan struct{}), + id: id, + m: m, + } + m.watchers[id] = w + for _, e := range queuedEvents { + w.result <- e + } + }) + return w +} + +// stopWatching stops the given watcher and removes it from the list. +func (m *Broadcaster) stopWatching(id int64) { + m.lock.Lock() + defer m.lock.Unlock() + w, ok := m.watchers[id] + if !ok { + // No need to do anything, it's already been removed from the list. + return + } + delete(m.watchers, id) + close(w.result) +} + +// closeAll disconnects all watchers (presumably in response to a Shutdown call). +func (m *Broadcaster) closeAll() { + m.lock.Lock() + defer m.lock.Unlock() + for _, w := range m.watchers { + close(w.result) + } + // Delete everything from the map, since presence/absence in the map is used + // by stopWatching to avoid double-closing the channel. + m.watchers = map[int64]*broadcasterWatcher{} +} + +// Action distributes the given event among all watchers. +func (m *Broadcaster) Action(action EventType, obj runtime.Object) { + m.incoming <- Event{action, obj} +} + +// Shutdown disconnects all watchers (but any queued events will still be distributed). +// You must not call Action or Watch* after calling Shutdown. This call blocks +// until all events have been distributed through the outbound channels. Note +// that since they can be buffered, this means that the watchers might not +// have received the data yet as it can remain sitting in the buffered +// channel. +func (m *Broadcaster) Shutdown() { + close(m.incoming) + m.distributing.Wait() +} + +// loop receives from m.incoming and distributes to all watchers. +func (m *Broadcaster) loop() { + // Deliberately not catching crashes here. Yes, bring down the process if there's a + // bug in watch.Broadcaster. + for event := range m.incoming { + if event.Type == internalRunFunctionMarker { + event.Object.(functionFakeRuntimeObject)() + continue + } + m.distribute(event) + } + m.closeAll() + m.distributing.Done() +} + +// distribute sends event to all watchers. Blocking. +func (m *Broadcaster) distribute(event Event) { + m.lock.Lock() + defer m.lock.Unlock() + if m.fullChannelBehavior == DropIfChannelFull { + for _, w := range m.watchers { + select { + case w.result <- event: + case <-w.stopped: + default: // Don't block if the event can't be queued. + } + } + } else { + for _, w := range m.watchers { + select { + case w.result <- event: + case <-w.stopped: + } + } + } +} + +// broadcasterWatcher handles a single watcher of a broadcaster +type broadcasterWatcher struct { + result chan Event + stopped chan struct{} + stop sync.Once + id int64 + m *Broadcaster +} + +// ResultChan returns a channel to use for waiting on events. +func (mw *broadcasterWatcher) ResultChan() <-chan Event { + return mw.result +} + +// Stop stops watching and removes mw from its list. +func (mw *broadcasterWatcher) Stop() { + mw.stop.Do(func() { + close(mw.stopped) + mw.m.stopWatching(mw.id) + }) +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go new file mode 100644 index 000000000000..d61cf5a2e58b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -0,0 +1,119 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "io" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" +) + +// Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. +type Decoder interface { + // Decode should return the type of event, the decoded object, or an error. + // An error will cause StreamWatcher to call Close(). Decode should block until + // it has data or an error occurs. + Decode() (action EventType, object runtime.Object, err error) + + // Close should close the underlying io.Reader, signalling to the source of + // the stream that it is no longer being watched. Close() must cause any + // outstanding call to Decode() to return with an error of some sort. + Close() +} + +// StreamWatcher turns any stream for which you can write a Decoder interface +// into a watch.Interface. +type StreamWatcher struct { + sync.Mutex + source Decoder + result chan Event + stopped bool +} + +// NewStreamWatcher creates a StreamWatcher from the given decoder. +func NewStreamWatcher(d Decoder) *StreamWatcher { + sw := &StreamWatcher{ + source: d, + // It's easy for a consumer to add buffering via an extra + // goroutine/channel, but impossible for them to remove it, + // so nonbuffered is better. + result: make(chan Event), + } + go sw.receive() + return sw +} + +// ResultChan implements Interface. +func (sw *StreamWatcher) ResultChan() <-chan Event { + return sw.result +} + +// Stop implements Interface. +func (sw *StreamWatcher) Stop() { + // Call Close() exactly once by locking and setting a flag. + sw.Lock() + defer sw.Unlock() + if !sw.stopped { + sw.stopped = true + sw.source.Close() + } +} + +// stopping returns true if Stop() was called previously. +func (sw *StreamWatcher) stopping() bool { + sw.Lock() + defer sw.Unlock() + return sw.stopped +} + +// receive reads result from the decoder in a loop and sends down the result channel. +func (sw *StreamWatcher) receive() { + defer close(sw.result) + defer sw.Stop() + defer utilruntime.HandleCrash() + for { + action, obj, err := sw.source.Decode() + if err != nil { + // Ignore expected error. + if sw.stopping() { + return + } + switch err { + case io.EOF: + // watch closed normally + case io.ErrUnexpectedEOF: + klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + default: + msg := "Unable to decode an event from the watch stream: %v" + if net.IsProbableEOF(err) { + klog.V(5).Infof(msg, err) + } else { + klog.Errorf(msg, err) + } + } + return + } + sw.result <- Event{ + Type: action, + Object: obj, + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go new file mode 100644 index 000000000000..be9c90c03d10 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -0,0 +1,317 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "fmt" + "sync" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/runtime" +) + +// Interface can be implemented by anything that knows how to watch and report changes. +type Interface interface { + // Stops watching. Will close the channel returned by ResultChan(). Releases + // any resources used by the watch. + Stop() + + // Returns a chan which will receive all the events. If an error occurs + // or Stop() is called, this channel will be closed, in which case the + // watch should be completely cleaned up. + ResultChan() <-chan Event +} + +// EventType defines the possible types of events. +type EventType string + +const ( + Added EventType = "ADDED" + Modified EventType = "MODIFIED" + Deleted EventType = "DELETED" + Error EventType = "ERROR" + + DefaultChanSize int32 = 100 +) + +// Event represents a single event to a watched resource. +// +k8s:deepcopy-gen=true +type Event struct { + Type EventType + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *api.Status is recommended; other types may make sense + // depending on context. + Object runtime.Object +} + +type emptyWatch chan Event + +// NewEmptyWatch returns a watch interface that returns no results and is closed. +// May be used in certain error conditions where no information is available but +// an error is not warranted. +func NewEmptyWatch() Interface { + ch := make(chan Event) + close(ch) + return emptyWatch(ch) +} + +// Stop implements Interface +func (w emptyWatch) Stop() { +} + +// ResultChan implements Interface +func (w emptyWatch) ResultChan() <-chan Event { + return chan Event(w) +} + +// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type FakeWatcher struct { + result chan Event + Stopped bool + sync.Mutex +} + +func NewFake() *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event), + } +} + +func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event, size), + } +} + +// Stop implements Interface.Stop(). +func (f *FakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.Stopped { + klog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.Stopped = true + } +} + +func (f *FakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + +// Reset prepares the watcher to be reused. +func (f *FakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.Stopped = false + f.result = make(chan Event) +} + +func (f *FakeWatcher) ResultChan() <-chan Event { + return f.result +} + +// Add sends an add event. +func (f *FakeWatcher) Add(obj runtime.Object) { + f.result <- Event{Added, obj} +} + +// Modify sends a modify event. +func (f *FakeWatcher) Modify(obj runtime.Object) { + f.result <- Event{Modified, obj} +} + +// Delete sends a delete event. +func (f *FakeWatcher) Delete(lastValue runtime.Object) { + f.result <- Event{Deleted, lastValue} +} + +// Error sends an Error event. +func (f *FakeWatcher) Error(errValue runtime.Object) { + f.result <- Event{Error, errValue} +} + +// Action sends an event of the requested type, for table-based testing. +func (f *FakeWatcher) Action(action EventType, obj runtime.Object) { + f.result <- Event{action, obj} +} + +// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type RaceFreeFakeWatcher struct { + result chan Event + Stopped bool + sync.Mutex +} + +func NewRaceFreeFake() *RaceFreeFakeWatcher { + return &RaceFreeFakeWatcher{ + result: make(chan Event, DefaultChanSize), + } +} + +// Stop implements Interface.Stop(). +func (f *RaceFreeFakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.Stopped { + klog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.Stopped = true + } +} + +func (f *RaceFreeFakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + +// Reset prepares the watcher to be reused. +func (f *RaceFreeFakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.Stopped = false + f.result = make(chan Event, DefaultChanSize) +} + +func (f *RaceFreeFakeWatcher) ResultChan() <-chan Event { + f.Lock() + defer f.Unlock() + return f.result +} + +// Add sends an add event. +func (f *RaceFreeFakeWatcher) Add(obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Added, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Modify sends a modify event. +func (f *RaceFreeFakeWatcher) Modify(obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Modified, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Delete sends a delete event. +func (f *RaceFreeFakeWatcher) Delete(lastValue runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Deleted, lastValue}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Error sends an Error event. +func (f *RaceFreeFakeWatcher) Error(errValue runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Error, errValue}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Action sends an event of the requested type, for table-based testing. +func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{action, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// ProxyWatcher lets you wrap your channel in watch Interface. Threadsafe. +type ProxyWatcher struct { + result chan Event + stopCh chan struct{} + + mutex sync.Mutex + stopped bool +} + +var _ Interface = &ProxyWatcher{} + +// NewProxyWatcher creates new ProxyWatcher by wrapping a channel +func NewProxyWatcher(ch chan Event) *ProxyWatcher { + return &ProxyWatcher{ + result: ch, + stopCh: make(chan struct{}), + stopped: false, + } +} + +// Stop implements Interface +func (pw *ProxyWatcher) Stop() { + pw.mutex.Lock() + defer pw.mutex.Unlock() + if !pw.stopped { + pw.stopped = true + close(pw.stopCh) + } +} + +// Stopping returns true if Stop() has been called +func (pw *ProxyWatcher) Stopping() bool { + pw.mutex.Lock() + defer pw.mutex.Unlock() + return pw.stopped +} + +// ResultChan implements Interface +func (pw *ProxyWatcher) ResultChan() <-chan Event { + return pw.result +} + +// StopChan returns stop channel +func (pw *ProxyWatcher) StopChan() <-chan struct{} { + return pw.stopCh +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go new file mode 100644 index 000000000000..71ef4da3348d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -0,0 +1,40 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package watch + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + if in.Object != nil { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go new file mode 100644 index 000000000000..7ed1d1cffec7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go @@ -0,0 +1,388 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package reflect is a fork of go's standard library reflection package, which +// allows for deep equal with equality functions defined. +package reflect + +import ( + "fmt" + "reflect" + "strings" +) + +// Equalities is a map from type to a function comparing two values of +// that type. +type Equalities map[reflect.Type]reflect.Value + +// For convenience, panics on errrors +func EqualitiesOrDie(funcs ...interface{}) Equalities { + e := Equalities{} + if err := e.AddFuncs(funcs...); err != nil { + panic(err) + } + return e +} + +// AddFuncs is a shortcut for multiple calls to AddFunc. +func (e Equalities) AddFuncs(funcs ...interface{}) error { + for _, f := range funcs { + if err := e.AddFunc(f); err != nil { + return err + } + } + return nil +} + +// AddFunc uses func as an equality function: it must take +// two parameters of the same type, and return a boolean. +func (e Equalities) AddFunc(eqFunc interface{}) error { + fv := reflect.ValueOf(eqFunc) + ft := fv.Type() + if ft.Kind() != reflect.Func { + return fmt.Errorf("expected func, got: %v", ft) + } + if ft.NumIn() != 2 { + return fmt.Errorf("expected two 'in' params, got: %v", ft) + } + if ft.NumOut() != 1 { + return fmt.Errorf("expected one 'out' param, got: %v", ft) + } + if ft.In(0) != ft.In(1) { + return fmt.Errorf("expected arg 1 and 2 to have same type, but got %v", ft) + } + var forReturnType bool + boolType := reflect.TypeOf(forReturnType) + if ft.Out(0) != boolType { + return fmt.Errorf("expected bool return, got: %v", ft) + } + e[ft.In(0)] = fv + return nil +} + +// Below here is forked from go's reflect/deepequal.go + +// During deepValueEqual, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited comparisons are stored in a map indexed by visit. +type visit struct { + a1 uintptr + a2 uintptr + typ reflect.Type +} + +// unexportedTypePanic is thrown when you use this DeepEqual on something that has an +// unexported type. It indicates a programmer error, so should not occur at runtime, +// which is why it's not public and thus impossible to catch. +type unexportedTypePanic []reflect.Type + +func (u unexportedTypePanic) Error() string { return u.String() } +func (u unexportedTypePanic) String() string { + strs := make([]string, len(u)) + for i, t := range u { + strs[i] = fmt.Sprintf("%v", t) + } + return "an unexported field was encountered, nested like this: " + strings.Join(strs, " -> ") +} + +func makeUsefulPanic(v reflect.Value) { + if x := recover(); x != nil { + if u, ok := x.(unexportedTypePanic); ok { + u = append(unexportedTypePanic{v.Type()}, u...) + x = u + } + panic(x) + } +} + +// Tests for deep equality using reflected types. The map argument tracks +// comparisons that have already been seen, which allows short circuiting on +// recursive types. +func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { + defer makeUsefulPanic(v1) + + if !v1.IsValid() || !v2.IsValid() { + return v1.IsValid() == v2.IsValid() + } + if v1.Type() != v2.Type() { + return false + } + if fv, ok := e[v1.Type()]; ok { + return fv.Call([]reflect.Value{v1, v2})[0].Bool() + } + + hard := func(k reflect.Kind) bool { + switch k { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: + return true + } + return false + } + + if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { + addr1 := v1.UnsafeAddr() + addr2 := v2.UnsafeAddr() + if addr1 > addr2 { + // Canonicalize order to reduce number of entries in visited. + addr1, addr2 = addr2, addr1 + } + + // Short circuit if references are identical ... + if addr1 == addr2 { + return true + } + + // ... or already seen + typ := v1.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + return true + } + + // Remember for later. + visited[v] = true + } + + switch v1.Kind() { + case reflect.Array: + // We don't need to check length here because length is part of + // an array's type, which has already been filtered for. + for i := 0; i < v1.Len(); i++ { + if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Slice: + if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { + return false + } + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() != v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for i := 0; i < v1.Len(); i++ { + if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Interface: + if v1.IsNil() || v2.IsNil() { + return v1.IsNil() == v2.IsNil() + } + return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Ptr: + return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Struct: + for i, n := 0, v1.NumField(); i < n; i++ { + if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) { + return false + } + } + return true + case reflect.Map: + if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { + return false + } + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() != v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for _, k := range v1.MapKeys() { + if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { + return false + } + } + return true + case reflect.Func: + if v1.IsNil() && v2.IsNil() { + return true + } + // Can't do better than this: + return false + default: + // Normal equality suffices + if !v1.CanInterface() || !v2.CanInterface() { + panic(unexportedTypePanic{}) + } + return v1.Interface() == v2.Interface() + } +} + +// DeepEqual is like reflect.DeepEqual, but focused on semantic equality +// instead of memory equality. +// +// It will use e's equality functions if it finds types that match. +// +// An empty slice *is* equal to a nil slice for our purposes; same for maps. +// +// Unexported field members cannot be compared and will cause an imformative panic; you must add an Equality +// function for these types. +func (e Equalities) DeepEqual(a1, a2 interface{}) bool { + if a1 == nil || a2 == nil { + return a1 == a2 + } + v1 := reflect.ValueOf(a1) + v2 := reflect.ValueOf(a2) + if v1.Type() != v2.Type() { + return false + } + return e.deepValueEqual(v1, v2, make(map[visit]bool), 0) +} + +func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { + defer makeUsefulPanic(v1) + + if !v1.IsValid() || !v2.IsValid() { + return v1.IsValid() == v2.IsValid() + } + if v1.Type() != v2.Type() { + return false + } + if fv, ok := e[v1.Type()]; ok { + return fv.Call([]reflect.Value{v1, v2})[0].Bool() + } + + hard := func(k reflect.Kind) bool { + switch k { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: + return true + } + return false + } + + if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { + addr1 := v1.UnsafeAddr() + addr2 := v2.UnsafeAddr() + if addr1 > addr2 { + // Canonicalize order to reduce number of entries in visited. + addr1, addr2 = addr2, addr1 + } + + // Short circuit if references are identical ... + if addr1 == addr2 { + return true + } + + // ... or already seen + typ := v1.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + return true + } + + // Remember for later. + visited[v] = true + } + + switch v1.Kind() { + case reflect.Array: + // We don't need to check length here because length is part of + // an array's type, which has already been filtered for. + for i := 0; i < v1.Len(); i++ { + if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Slice: + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() > v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for i := 0; i < v1.Len(); i++ { + if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.String: + if v1.Len() == 0 { + return true + } + if v1.Len() > v2.Len() { + return false + } + return v1.String() == v2.String() + case reflect.Interface: + if v1.IsNil() { + return true + } + return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Ptr: + if v1.IsNil() { + return true + } + return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Struct: + for i, n := 0, v1.NumField(); i < n; i++ { + if !e.deepValueDerive(v1.Field(i), v2.Field(i), visited, depth+1) { + return false + } + } + return true + case reflect.Map: + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() > v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for _, k := range v1.MapKeys() { + if !e.deepValueDerive(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { + return false + } + } + return true + case reflect.Func: + if v1.IsNil() && v2.IsNil() { + return true + } + // Can't do better than this: + return false + default: + // Normal equality suffices + if !v1.CanInterface() || !v2.CanInterface() { + panic(unexportedTypePanic{}) + } + return v1.Interface() == v2.Interface() + } +} + +// DeepDerivative is similar to DeepEqual except that unset fields in a1 are +// ignored (not compared). This allows us to focus on the fields that matter to +// the semantic comparison. +// +// The unset fields include a nil pointer and an empty string. +func (e Equalities) DeepDerivative(a1, a2 interface{}) bool { + if a1 == nil { + return true + } + v1 := reflect.ValueOf(a1) + v2 := reflect.ValueOf(a2) + if v1.Type() != v2.Type() { + return false + } + return e.deepValueDerive(v1, v2, make(map[visit]bool), 0) +} diff --git a/vendor/k8s.io/client-go/LICENSE b/vendor/k8s.io/client-go/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/k8s.io/client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS new file mode 100644 index 000000000000..3b7ea1b131f2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS @@ -0,0 +1,7 @@ +# approval on api packages bubbles to api-approvers +reviewers: +- sig-auth-authenticators-approvers +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go new file mode 100644 index 000000000000..b99459757e56 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=client.authentication.k8s.io + +package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go new file mode 100644 index 000000000000..e4fbc3ea9d41 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientauthentication + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "client.authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ExecCredential{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go new file mode 100644 index 000000000000..6fb53cecf94a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientauthentication + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExecCredentials is used by exec-based plugins to communicate credentials to +// HTTP transports. +type ExecCredential struct { + metav1.TypeMeta + + // Spec holds information passed to the plugin by the transport. This contains + // request and runtime specific information, such as if the session is interactive. + Spec ExecCredentialSpec + + // Status is filled in by the plugin and holds the credentials that the transport + // should use to contact the API. + // +optional + Status *ExecCredentialStatus +} + +// ExecCredenitalSpec holds request and runtime specific information provided by +// the transport. +type ExecCredentialSpec struct { + // Response is populated when the transport encounters HTTP status codes, such as 401, + // suggesting previous credentials were invalid. + // +optional + Response *Response + + // Interactive is true when the transport detects the command is being called from an + // interactive prompt. + // +optional + Interactive bool +} + +// ExecCredentialStatus holds credentials for the transport to use. +type ExecCredentialStatus struct { + // ExpirationTimestamp indicates a time when the provided credentials expire. + // +optional + ExpirationTimestamp *metav1.Time + // Token is a bearer token used by the client for request authentication. + // +optional + Token string + // PEM-encoded client TLS certificate. + // +optional + ClientCertificateData string + // PEM-encoded client TLS private key. + // +optional + ClientKeyData string +} + +// Response defines metadata about a failed request, including HTTP status code and +// response headers. +type Response struct { + // Headers holds HTTP headers returned by the server. + Header map[string][]string + // Code is the HTTP status code returned by the server. + Code int32 +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go new file mode 100644 index 000000000000..19ab7761400f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=client.authentication.k8s.io + +package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go new file mode 100644 index 000000000000..2acd13dead01 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "client.authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ExecCredential{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go new file mode 100644 index 000000000000..921f3a2b94d1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExecCredentials is used by exec-based plugins to communicate credentials to +// HTTP transports. +type ExecCredential struct { + metav1.TypeMeta `json:",inline"` + + // Spec holds information passed to the plugin by the transport. This contains + // request and runtime specific information, such as if the session is interactive. + Spec ExecCredentialSpec `json:"spec,omitempty"` + + // Status is filled in by the plugin and holds the credentials that the transport + // should use to contact the API. + // +optional + Status *ExecCredentialStatus `json:"status,omitempty"` +} + +// ExecCredenitalSpec holds request and runtime specific information provided by +// the transport. +type ExecCredentialSpec struct { + // Response is populated when the transport encounters HTTP status codes, such as 401, + // suggesting previous credentials were invalid. + // +optional + Response *Response `json:"response,omitempty"` + + // Interactive is true when the transport detects the command is being called from an + // interactive prompt. + // +optional + Interactive bool `json:"interactive,omitempty"` +} + +// ExecCredentialStatus holds credentials for the transport to use. +// +// Token and ClientKeyData are sensitive fields. This data should only be +// transmitted in-memory between client and exec plugin process. Exec plugin +// itself should at least be protected via file permissions. +type ExecCredentialStatus struct { + // ExpirationTimestamp indicates a time when the provided credentials expire. + // +optional + ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` + // Token is a bearer token used by the client for request authentication. + Token string `json:"token,omitempty"` + // PEM-encoded client TLS certificates (including intermediates, if any). + ClientCertificateData string `json:"clientCertificateData,omitempty"` + // PEM-encoded private key for the above certificate. + ClientKeyData string `json:"clientKeyData,omitempty"` +} + +// Response defines metadata about a failed request, including HTTP status code and +// response headers. +type Response struct { + // Header holds HTTP headers returned by the server. + Header map[string][]string `json:"header,omitempty"` + // Code is the HTTP status code returned by the server. + Code int32 `json:"code,omitempty"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000000..461c20b29826 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,176 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Response)(nil), (*clientauthentication.Response)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Response_To_clientauthentication_Response(a.(*Response), b.(*clientauthentication.Response), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.Response)(nil), (*Response)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_Response_To_v1alpha1_Response(a.(*clientauthentication.Response), b.(*Response), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + if err := Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function. +func Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + return autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + if err := Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in, out, s) +} + +func autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + out.Response = (*clientauthentication.Response)(unsafe.Pointer(in.Response)) + out.Interactive = in.Interactive + return nil +} + +// Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function. +func Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + out.Response = (*Response)(unsafe.Pointer(in.Response)) + out.Interactive = in.Interactive + return nil +} + +// Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s) +} + +func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function. +func Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in, out, s) +} + +func autoConvert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error { + out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header)) + out.Code = in.Code + return nil +} + +// Convert_v1alpha1_Response_To_clientauthentication_Response is an autogenerated conversion function. +func Convert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error { + return autoConvert_v1alpha1_Response_To_clientauthentication_Response(in, out, s) +} + +func autoConvert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error { + out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header)) + out.Code = in.Code + return nil +} + +// Convert_clientauthentication_Response_To_v1alpha1_Response is an autogenerated conversion function. +func Convert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error { + return autoConvert_clientauthentication_Response_To_v1alpha1_Response(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..a73d31b3f14b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,128 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. +func (in *ExecCredential) DeepCopy() *ExecCredential { + if in == nil { + return nil + } + out := new(ExecCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExecCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(Response) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. +func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { + if in == nil { + return nil + } + out := new(ExecCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { + *out = *in + if in.ExpirationTimestamp != nil { + in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. +func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { + if in == nil { + return nil + } + out := new(ExecCredentialStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Response) DeepCopyInto(out *Response) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response. +func (in *Response) DeepCopy() *Response { + if in == nil { + return nil + } + out := new(Response) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000000..dd621a3acda8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go new file mode 100644 index 000000000000..c568a6fc8a4d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go @@ -0,0 +1,128 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package clientauthentication + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. +func (in *ExecCredential) DeepCopy() *ExecCredential { + if in == nil { + return nil + } + out := new(ExecCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExecCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(Response) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. +func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { + if in == nil { + return nil + } + out := new(ExecCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { + *out = *in + if in.ExpirationTimestamp != nil { + in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. +func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { + if in == nil { + return nil + } + out := new(ExecCredentialStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Response) DeepCopyInto(out *Response) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response. +func (in *Response) DeepCopy() *Response { + if in == nil { + return nil + } + out := new(Response) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml new file mode 100644 index 000000000000..fc0d2caf33f0 --- /dev/null +++ b/vendor/k8s.io/klog/.travis.yml @@ -0,0 +1,14 @@ +language: go +dist: xenial +go: + - 1.9.x + - 1.10.x + - 1.11.x +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(golint $(go list -e ./...)) + - go tool vet . + - go test -v -race ./... +install: + - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md new file mode 100644 index 000000000000..de4711513724 --- /dev/null +++ b/vendor/k8s.io/klog/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + diff --git a/vendor/k8s.io/klog/LICENSE b/vendor/k8s.io/klog/LICENSE new file mode 100644 index 000000000000..37ec93a14fdc --- /dev/null +++ b/vendor/k8s.io/klog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS new file mode 100644 index 000000000000..56b0eb044f86 --- /dev/null +++ b/vendor/k8s.io/klog/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +approvers: + - dims + - thockin + - justinsb + - tallclair + - piosz + - brancz + - DirectXMan12 + - lavalamp diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md new file mode 100644 index 000000000000..a747f538a848 --- /dev/null +++ b/vendor/k8s.io/klog/README.md @@ -0,0 +1,51 @@ +klog +==== + +klog is a permanant fork of https://github.com/golang/glog. original README from glog is below + +---- + +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/k8s.io/klog/RELEASE.md b/vendor/k8s.io/klog/RELEASE.md new file mode 100644 index 000000000000..b53eb960ce78 --- /dev/null +++ b/vendor/k8s.io/klog/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The `klog` is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS new file mode 100644 index 000000000000..520ddb525754 --- /dev/null +++ b/vendor/k8s.io/klog/SECURITY_CONTACTS @@ -0,0 +1,20 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +dims +thockin +justinsb +tallclair +piosz +brancz +DirectXMan12 +lavalamp diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go new file mode 100644 index 000000000000..13bcc81a756b --- /dev/null +++ b/vendor/k8s.io/klog/klog.go @@ -0,0 +1,1239 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package klog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// InitFlags is for explicitly initializing the flags +func InitFlags(flagset *flag.FlagSet) { + if flagset == nil { + flagset = flag.CommandLine + } + flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory") + flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file") + flagset.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flagset.Var(&logging.verbosity, "v", "log level for V logs") + flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") + flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ + + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir string + + // If non-empty, specifies the path of the file to write logs. mutually exclusive + // with the log-dir option. + logFile string + + // If true, do not add the prefix headers, useful when used with SetOutput + skipHeaders bool +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + if l.skipHeaders { + return buf + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// redirectBuffer is used to set an alternate destination for the logs +type redirectBuffer struct { + w io.Writer +} + +func (rb *redirectBuffer) Sync() error { + return nil +} + +func (rb *redirectBuffer) Flush() error { + return nil +} + +func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { + return rb.w.Write(bytes) +} + +// SetOutput sets the output destination for all severities +func SetOutput(w io.Writer) { + for s := fatalLog; s >= infoLog; s-- { + rb := &redirectBuffer{ + w: w, + } + logging.file[s] = rb + } +} + +// SetOutputBySeverity sets the output destination for specific severity +func SetOutputBySeverity(name string, w io.Writer) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) + } + rb := &redirectBuffer{ + w: w, + } + logging.file[sev] = rb +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go new file mode 100644 index 000000000000..b76a4e10bec0 --- /dev/null +++ b/vendor/k8s.io/klog/klog_file.go @@ -0,0 +1,126 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package klog + +import ( + "errors" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +func createLogDirs() { + if logging.logDir != "" { + logDirs = append(logDirs, logging.logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + if logging.logFile != "" { + f, err := os.Create(logging.logFile) + if err == nil { + return f, logging.logFile, nil + } + return nil, "", fmt.Errorf("log: unable to create log: %v", err) + } + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 1e8b6b570808..0f6076b2b39a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -9,7 +9,7 @@ github.com/apparentlymart/go-cidr/cidr github.com/apparentlymart/go-textseg/textseg # github.com/armon/go-radix v1.0.0 github.com/armon/go-radix -# github.com/aws/aws-sdk-go v1.16.25 +# github.com/aws/aws-sdk-go v1.16.26 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr @@ -174,6 +174,9 @@ github.com/boombuler/barcode/qr github.com/boombuler/barcode/utils # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew +# github.com/gogo/protobuf v1.2.0 +github.com/gogo/protobuf/proto +github.com/gogo/protobuf/sortkeys # github.com/golang/protobuf v0.0.0-20171113180720-1e59b77b52bf github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes @@ -182,6 +185,8 @@ github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp # github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db github.com/golang/snappy +# github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf +github.com/google/gofuzz # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap # github.com/hashicorp/go-cleanhttp v0.5.0 @@ -278,6 +283,9 @@ github.com/keybase/go-crypto/rsa github.com/keybase/go-crypto/brainpool github.com/keybase/go-crypto/cast5 github.com/keybase/go-crypto/openpgp/elgamal +# github.com/kubernetes-sigs/aws-iam-authenticator v0.3.1-0.20181019024009-82544ec86140 +github.com/kubernetes-sigs/aws-iam-authenticator/pkg/token +github.com/kubernetes-sigs/aws-iam-authenticator/pkg/arn # github.com/mattn/go-isatty v0.0.4 github.com/mattn/go-isatty # github.com/mitchellh/cli v0.0.0-20170803042910-8a539dbef410 @@ -378,5 +386,34 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/transport +# gopkg.in/inf.v0 v0.9.1 +gopkg.in/inf.v0 # gopkg.in/yaml.v2 v2.2.1 gopkg.in/yaml.v2 +# k8s.io/apimachinery v0.0.0-20190204010555-a98ff070d70e +k8s.io/apimachinery/pkg/apis/meta/v1 +k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/conversion +k8s.io/apimachinery/pkg/fields +k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/runtime/schema +k8s.io/apimachinery/pkg/selection +k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/util/intstr +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/third_party/forked/golang/reflect +k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/util/validation +k8s.io/apimachinery/pkg/conversion/queryparams +k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/json +k8s.io/apimachinery/pkg/util/naming +k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/validation/field +# k8s.io/client-go v10.0.0+incompatible +k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 +k8s.io/client-go/pkg/apis/clientauthentication +# k8s.io/klog v0.1.0 +k8s.io/klog From 797297f5653117bba74f1f4a44d4073630d843ce Mon Sep 17 00:00:00 2001 From: Michael Barrientos Date: Wed, 6 Feb 2019 09:54:37 -0800 Subject: [PATCH 4/5] aws_eks_cluster_auth add markdown docs --- website/aws.erb | 3 ++ website/docs/d/eks_cluster_auth.html.markdown | 44 +++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 website/docs/d/eks_cluster_auth.html.markdown diff --git a/website/aws.erb b/website/aws.erb index b6b27de73431..7f50f9dcd986 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -176,6 +176,9 @@ > aws_eks_cluster + > + aws_eks_cluster_auth + > aws_elastic_beanstalk_application diff --git a/website/docs/d/eks_cluster_auth.html.markdown b/website/docs/d/eks_cluster_auth.html.markdown new file mode 100644 index 000000000000..f4bd0af3054b --- /dev/null +++ b/website/docs/d/eks_cluster_auth.html.markdown @@ -0,0 +1,44 @@ +--- +layout: "aws" +page_title: "AWS: aws_eks_cluster_auth" +sidebar_current: "docs-aws-datasource-eks-cluster-auth" +description: |- + Get an authentication token to communicate with an EKS Cluster +--- + +# Data Source: aws_eks_cluster + +Get an authentication token to communicate with an EKS cluster. + +Uses IAM credentials from the AWS provider to generate a temporary token that is compatible with +[AWS IAM Authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator) authentication. +This can be used to authenticate to an EKS cluster or to a cluster that has the AWS IAM Authenticator +server configured. + +## Example Usage + +```hcl + +data "aws_eks_cluster" "example" { + name = "example" +} + +data "aws_eks_cluster_auth" "example" { + name = "example" +} + +provider "kubernetes" { + host = "${data.aws_eks_cluster.cluster.endpoint}" + cluster_ca_certificate = "${base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)}" + token = "${data.aws_eks_cluster_auth.example.token}" + load_config_file = false +} +``` + +## Argument Reference + +* `name` - (Required) The name of the cluster + +## Attributes Reference + +* `token` - The token to use to authenticate with the cluster. From 28a811fefe36de3f8e3981cdb10cd2c50d88604b Mon Sep 17 00:00:00 2001 From: Michael Barrientos Date: Wed, 6 Feb 2019 11:22:02 -0800 Subject: [PATCH 5/5] aws_eks_cluster_auth test fixes --- aws/data_source_aws_eks_cluster_auth_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/aws/data_source_aws_eks_cluster_auth_test.go b/aws/data_source_aws_eks_cluster_auth_test.go index 8fae1d4d8db2..dd58e5f79795 100644 --- a/aws/data_source_aws_eks_cluster_auth_test.go +++ b/aws/data_source_aws_eks_cluster_auth_test.go @@ -32,13 +32,14 @@ func testAccCheckAwsEksClusterAuthToken(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Can't find EKS Cluster Auth resource: %s", n) + return fmt.Errorf("Not found: %s", n) } if rs.Primary.ID == "" { - return fmt.Errorf("EKS Cluster Auth resource ID not set") + return fmt.Errorf("No resource ID is set") } + name := rs.Primary.Attributes["name"] tok := rs.Primary.Attributes["token"] verifier := token.NewVerifier(name) identity, err := verifier.Verify(tok) @@ -46,7 +47,7 @@ func testAccCheckAwsEksClusterAuthToken(n string) resource.TestCheckFunc { return fmt.Errorf("Error verifying token for cluster %q: %v", name, err) } if identity.ARN == "" { - return fmt.Errorf("Received unexpected blank ARN for token identity") + return fmt.Errorf("Unexpected blank ARN for token identity") } return nil